VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 103667

Last change on this file since 103667 was 103613, checked in by vboxsync, 12 months ago

VMM/IEM: Experimental code for emitting native code instead of calling AImpl helper, experimenting on: xor reg32,reg32. bugref:10376

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 178.6 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 103613 2024-02-29 13:01:56Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 103613 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
133## @{
134g_ksFinishAnnotation_Advance = 'Advance';
135g_ksFinishAnnotation_RelJmp = 'RelJmp';
136g_ksFinishAnnotation_SetJmp = 'SetJmp';
137g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
138## @}
139
140
141class ThreadedParamRef(object):
142 """
143 A parameter reference for a threaded function.
144 """
145
146 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
147 ## The name / reference in the original code.
148 self.sOrgRef = sOrgRef;
149 ## Normalized name to deal with spaces in macro invocations and such.
150 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
151 ## Indicates that sOrgRef may not match the parameter.
152 self.fCustomRef = sStdRef is not None;
153 ## The type (typically derived).
154 self.sType = sType;
155 ## The statement making the reference.
156 self.oStmt = oStmt;
157 ## The parameter containing the references. None if implicit.
158 self.iParam = iParam;
159 ## The offset in the parameter of the reference.
160 self.offParam = offParam;
161
162 ## The variable name in the threaded function.
163 self.sNewName = 'x';
164 ## The this is packed into.
165 self.iNewParam = 99;
166 ## The bit offset in iNewParam.
167 self.offNewParam = 1024
168
169
170class ThreadedFunctionVariation(object):
171 """ Threaded function variation. """
172
173 ## @name Variations.
174 ## These variations will match translation block selection/distinctions as well.
175 ## @{
176 # pylint: disable=line-too-long
177 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
178 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
179 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
180 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
181 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
182 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
183 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
184 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
185 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
186 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
187 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
188 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
189 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
190 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
191 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
192 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
193 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
194 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
195 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
196 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
197 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
198 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
199 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
200 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
201 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
202 ksVariation_64 = '_64'; ##< 64-bit mode code.
203 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
204 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
205 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
206 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
207 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
208 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
209 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
210 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
211 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
212 # pylint: enable=line-too-long
213 kasVariations = (
214 ksVariation_Default,
215 ksVariation_16,
216 ksVariation_16f,
217 ksVariation_16_Jmp,
218 ksVariation_16f_Jmp,
219 ksVariation_16_NoJmp,
220 ksVariation_16f_NoJmp,
221 ksVariation_16_Addr32,
222 ksVariation_16f_Addr32,
223 ksVariation_16_Pre386,
224 ksVariation_16f_Pre386,
225 ksVariation_16_Pre386_Jmp,
226 ksVariation_16f_Pre386_Jmp,
227 ksVariation_16_Pre386_NoJmp,
228 ksVariation_16f_Pre386_NoJmp,
229 ksVariation_32,
230 ksVariation_32f,
231 ksVariation_32_Jmp,
232 ksVariation_32f_Jmp,
233 ksVariation_32_NoJmp,
234 ksVariation_32f_NoJmp,
235 ksVariation_32_Flat,
236 ksVariation_32f_Flat,
237 ksVariation_32_Addr16,
238 ksVariation_32f_Addr16,
239 ksVariation_64,
240 ksVariation_64f,
241 ksVariation_64_Jmp,
242 ksVariation_64f_Jmp,
243 ksVariation_64_NoJmp,
244 ksVariation_64f_NoJmp,
245 ksVariation_64_FsGs,
246 ksVariation_64f_FsGs,
247 ksVariation_64_Addr32,
248 ksVariation_64f_Addr32,
249 );
250 kasVariationsWithoutAddress = (
251 ksVariation_16,
252 ksVariation_16f,
253 ksVariation_16_Pre386,
254 ksVariation_16f_Pre386,
255 ksVariation_32,
256 ksVariation_32f,
257 ksVariation_64,
258 ksVariation_64f,
259 );
260 kasVariationsWithoutAddressNot286 = (
261 ksVariation_16,
262 ksVariation_16f,
263 ksVariation_32,
264 ksVariation_32f,
265 ksVariation_64,
266 ksVariation_64f,
267 );
268 kasVariationsWithoutAddressNot286Not64 = (
269 ksVariation_16,
270 ksVariation_16f,
271 ksVariation_32,
272 ksVariation_32f,
273 );
274 kasVariationsWithoutAddressNot64 = (
275 ksVariation_16,
276 ksVariation_16f,
277 ksVariation_16_Pre386,
278 ksVariation_16f_Pre386,
279 ksVariation_32,
280 ksVariation_32f,
281 );
282 kasVariationsWithoutAddressOnly64 = (
283 ksVariation_64,
284 ksVariation_64f,
285 );
286 kasVariationsWithAddress = (
287 ksVariation_16,
288 ksVariation_16f,
289 ksVariation_16_Addr32,
290 ksVariation_16f_Addr32,
291 ksVariation_16_Pre386,
292 ksVariation_16f_Pre386,
293 ksVariation_32,
294 ksVariation_32f,
295 ksVariation_32_Flat,
296 ksVariation_32f_Flat,
297 ksVariation_32_Addr16,
298 ksVariation_32f_Addr16,
299 ksVariation_64,
300 ksVariation_64f,
301 ksVariation_64_FsGs,
302 ksVariation_64f_FsGs,
303 ksVariation_64_Addr32,
304 ksVariation_64f_Addr32,
305 );
306 kasVariationsWithAddressNot286 = (
307 ksVariation_16,
308 ksVariation_16f,
309 ksVariation_16_Addr32,
310 ksVariation_16f_Addr32,
311 ksVariation_32,
312 ksVariation_32f,
313 ksVariation_32_Flat,
314 ksVariation_32f_Flat,
315 ksVariation_32_Addr16,
316 ksVariation_32f_Addr16,
317 ksVariation_64,
318 ksVariation_64f,
319 ksVariation_64_FsGs,
320 ksVariation_64f_FsGs,
321 ksVariation_64_Addr32,
322 ksVariation_64f_Addr32,
323 );
324 kasVariationsWithAddressNot286Not64 = (
325 ksVariation_16,
326 ksVariation_16f,
327 ksVariation_16_Addr32,
328 ksVariation_16f_Addr32,
329 ksVariation_32,
330 ksVariation_32f,
331 ksVariation_32_Flat,
332 ksVariation_32f_Flat,
333 ksVariation_32_Addr16,
334 ksVariation_32f_Addr16,
335 );
336 kasVariationsWithAddressNot64 = (
337 ksVariation_16,
338 ksVariation_16f,
339 ksVariation_16_Addr32,
340 ksVariation_16f_Addr32,
341 ksVariation_16_Pre386,
342 ksVariation_16f_Pre386,
343 ksVariation_32,
344 ksVariation_32f,
345 ksVariation_32_Flat,
346 ksVariation_32f_Flat,
347 ksVariation_32_Addr16,
348 ksVariation_32f_Addr16,
349 );
350 kasVariationsWithAddressOnly64 = (
351 ksVariation_64,
352 ksVariation_64f,
353 ksVariation_64_FsGs,
354 ksVariation_64f_FsGs,
355 ksVariation_64_Addr32,
356 ksVariation_64f_Addr32,
357 );
358 kasVariationsOnlyPre386 = (
359 ksVariation_16_Pre386,
360 ksVariation_16f_Pre386,
361 );
362 kasVariationsEmitOrder = (
363 ksVariation_Default,
364 ksVariation_64,
365 ksVariation_64f,
366 ksVariation_64_Jmp,
367 ksVariation_64f_Jmp,
368 ksVariation_64_NoJmp,
369 ksVariation_64f_NoJmp,
370 ksVariation_64_FsGs,
371 ksVariation_64f_FsGs,
372 ksVariation_32_Flat,
373 ksVariation_32f_Flat,
374 ksVariation_32,
375 ksVariation_32f,
376 ksVariation_32_Jmp,
377 ksVariation_32f_Jmp,
378 ksVariation_32_NoJmp,
379 ksVariation_32f_NoJmp,
380 ksVariation_16,
381 ksVariation_16f,
382 ksVariation_16_Jmp,
383 ksVariation_16f_Jmp,
384 ksVariation_16_NoJmp,
385 ksVariation_16f_NoJmp,
386 ksVariation_16_Addr32,
387 ksVariation_16f_Addr32,
388 ksVariation_16_Pre386,
389 ksVariation_16f_Pre386,
390 ksVariation_16_Pre386_Jmp,
391 ksVariation_16f_Pre386_Jmp,
392 ksVariation_16_Pre386_NoJmp,
393 ksVariation_16f_Pre386_NoJmp,
394 ksVariation_32_Addr16,
395 ksVariation_32f_Addr16,
396 ksVariation_64_Addr32,
397 ksVariation_64f_Addr32,
398 );
399 kdVariationNames = {
400 ksVariation_Default: 'defer-to-cimpl',
401 ksVariation_16: '16-bit',
402 ksVariation_16f: '16-bit w/ eflag checking and clearing',
403 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
404 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
405 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
406 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
407 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
408 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
409 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
410 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
411 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
412 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
413 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
414 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
415 ksVariation_32: '32-bit',
416 ksVariation_32f: '32-bit w/ eflag checking and clearing',
417 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
418 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
419 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
420 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
421 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
422 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
423 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
424 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
425 ksVariation_64: '64-bit',
426 ksVariation_64f: '64-bit w/ eflag checking and clearing',
427 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
428 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
429 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
430 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
431 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
432 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
433 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
434 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
435 };
436 kdVariationsWithEflagsCheckingAndClearing = {
437 ksVariation_16f: True,
438 ksVariation_16f_Jmp: True,
439 ksVariation_16f_NoJmp: True,
440 ksVariation_16f_Addr32: True,
441 ksVariation_16f_Pre386: True,
442 ksVariation_16f_Pre386_Jmp: True,
443 ksVariation_16f_Pre386_NoJmp: True,
444 ksVariation_32f: True,
445 ksVariation_32f_Jmp: True,
446 ksVariation_32f_NoJmp: True,
447 ksVariation_32f_Flat: True,
448 ksVariation_32f_Addr16: True,
449 ksVariation_64f: True,
450 ksVariation_64f_Jmp: True,
451 ksVariation_64f_NoJmp: True,
452 ksVariation_64f_FsGs: True,
453 ksVariation_64f_Addr32: True,
454 };
455 kdVariationsOnly64NoFlags = {
456 ksVariation_64: True,
457 ksVariation_64_Jmp: True,
458 ksVariation_64_NoJmp: True,
459 ksVariation_64_FsGs: True,
460 ksVariation_64_Addr32: True,
461 };
462 kdVariationsOnly64WithFlags = {
463 ksVariation_64f: True,
464 ksVariation_64f_Jmp: True,
465 ksVariation_64f_NoJmp: True,
466 ksVariation_64f_FsGs: True,
467 ksVariation_64f_Addr32: True,
468 };
469 kdVariationsOnlyPre386NoFlags = {
470 ksVariation_16_Pre386: True,
471 ksVariation_16_Pre386_Jmp: True,
472 ksVariation_16_Pre386_NoJmp: True,
473 };
474 kdVariationsOnlyPre386WithFlags = {
475 ksVariation_16f_Pre386: True,
476 ksVariation_16f_Pre386_Jmp: True,
477 ksVariation_16f_Pre386_NoJmp: True,
478 };
479 kdVariationsWithFlatAddress = {
480 ksVariation_32_Flat: True,
481 ksVariation_32f_Flat: True,
482 ksVariation_64: True,
483 ksVariation_64f: True,
484 ksVariation_64_Addr32: True,
485 ksVariation_64f_Addr32: True,
486 };
487 kdVariationsWithFlatStackAddress = {
488 ksVariation_32_Flat: True,
489 ksVariation_32f_Flat: True,
490 ksVariation_64: True,
491 ksVariation_64f: True,
492 ksVariation_64_FsGs: True,
493 ksVariation_64f_FsGs: True,
494 ksVariation_64_Addr32: True,
495 ksVariation_64f_Addr32: True,
496 };
497 kdVariationsWithFlat64StackAddress = {
498 ksVariation_64: True,
499 ksVariation_64f: True,
500 ksVariation_64_FsGs: True,
501 ksVariation_64f_FsGs: True,
502 ksVariation_64_Addr32: True,
503 ksVariation_64f_Addr32: True,
504 };
505 kdVariationsWithFlatAddr16 = {
506 ksVariation_16: True,
507 ksVariation_16f: True,
508 ksVariation_16_Pre386: True,
509 ksVariation_16f_Pre386: True,
510 ksVariation_32_Addr16: True,
511 ksVariation_32f_Addr16: True,
512 };
513 kdVariationsWithFlatAddr32No64 = {
514 ksVariation_16_Addr32: True,
515 ksVariation_16f_Addr32: True,
516 ksVariation_32: True,
517 ksVariation_32f: True,
518 ksVariation_32_Flat: True,
519 ksVariation_32f_Flat: True,
520 };
521 kdVariationsWithAddressOnly64 = {
522 ksVariation_64: True,
523 ksVariation_64f: True,
524 ksVariation_64_FsGs: True,
525 ksVariation_64f_FsGs: True,
526 ksVariation_64_Addr32: True,
527 ksVariation_64f_Addr32: True,
528 };
529 kdVariationsWithConditional = {
530 ksVariation_16_Jmp: True,
531 ksVariation_16_NoJmp: True,
532 ksVariation_16_Pre386_Jmp: True,
533 ksVariation_16_Pre386_NoJmp: True,
534 ksVariation_32_Jmp: True,
535 ksVariation_32_NoJmp: True,
536 ksVariation_64_Jmp: True,
537 ksVariation_64_NoJmp: True,
538 ksVariation_16f_Jmp: True,
539 ksVariation_16f_NoJmp: True,
540 ksVariation_16f_Pre386_Jmp: True,
541 ksVariation_16f_Pre386_NoJmp: True,
542 ksVariation_32f_Jmp: True,
543 ksVariation_32f_NoJmp: True,
544 ksVariation_64f_Jmp: True,
545 ksVariation_64f_NoJmp: True,
546 };
547 kdVariationsWithConditionalNoJmp = {
548 ksVariation_16_NoJmp: True,
549 ksVariation_16_Pre386_NoJmp: True,
550 ksVariation_32_NoJmp: True,
551 ksVariation_64_NoJmp: True,
552 ksVariation_16f_NoJmp: True,
553 ksVariation_16f_Pre386_NoJmp: True,
554 ksVariation_32f_NoJmp: True,
555 ksVariation_64f_NoJmp: True,
556 };
557 kdVariationsOnlyPre386 = {
558 ksVariation_16_Pre386: True,
559 ksVariation_16f_Pre386: True,
560 ksVariation_16_Pre386_Jmp: True,
561 ksVariation_16f_Pre386_Jmp: True,
562 ksVariation_16_Pre386_NoJmp: True,
563 ksVariation_16f_Pre386_NoJmp: True,
564 };
565 ## @}
566
567 ## IEM_CIMPL_F_XXX flags that we know.
568 ## The value indicates whether it terminates the TB or not. The goal is to
569 ## improve the recompiler so all but END_TB will be False.
570 ##
571 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
572 kdCImplFlags = {
573 'IEM_CIMPL_F_MODE': False,
574 'IEM_CIMPL_F_BRANCH_DIRECT': False,
575 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
576 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
577 'IEM_CIMPL_F_BRANCH_FAR': True,
578 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
579 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
580 'IEM_CIMPL_F_BRANCH_STACK': False,
581 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
582 'IEM_CIMPL_F_RFLAGS': False,
583 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
584 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
585 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
586 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
587 'IEM_CIMPL_F_STATUS_FLAGS': False,
588 'IEM_CIMPL_F_VMEXIT': False,
589 'IEM_CIMPL_F_FPU': False,
590 'IEM_CIMPL_F_REP': False,
591 'IEM_CIMPL_F_IO': False,
592 'IEM_CIMPL_F_END_TB': True,
593 'IEM_CIMPL_F_XCPT': True,
594 'IEM_CIMPL_F_CALLS_CIMPL': False,
595 'IEM_CIMPL_F_CALLS_AIMPL': False,
596 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
597 };
598
599 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
600 self.oParent = oThreadedFunction # type: ThreadedFunction
601 ##< ksVariation_Xxxx.
602 self.sVariation = sVariation
603
604 ## Threaded function parameter references.
605 self.aoParamRefs = [] # type: List[ThreadedParamRef]
606 ## Unique parameter references.
607 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
608 ## Minimum number of parameters to the threaded function.
609 self.cMinParams = 0;
610
611 ## List/tree of statements for the threaded function.
612 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
613
614 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
615 self.iEnumValue = -1;
616
617 ## Native recompilation details for this variation.
618 self.oNativeRecomp = None;
619
620 def getIndexName(self):
621 sName = self.oParent.oMcBlock.sFunction;
622 if sName.startswith('iemOp_'):
623 sName = sName[len('iemOp_'):];
624 return 'kIemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
625
626 def getThreadedFunctionName(self):
627 sName = self.oParent.oMcBlock.sFunction;
628 if sName.startswith('iemOp_'):
629 sName = sName[len('iemOp_'):];
630 return 'iemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
631
632 def getNativeFunctionName(self):
633 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
634
635 def getLivenessFunctionName(self):
636 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
637
638 def getShortName(self):
639 sName = self.oParent.oMcBlock.sFunction;
640 if sName.startswith('iemOp_'):
641 sName = sName[len('iemOp_'):];
642 return '%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
643
644 def getThreadedFunctionStatisticsName(self):
645 sName = self.oParent.oMcBlock.sFunction;
646 if sName.startswith('iemOp_'):
647 sName = sName[len('iemOp_'):];
648
649 sVarNm = self.sVariation;
650 if sVarNm:
651 if sVarNm.startswith('_'):
652 sVarNm = sVarNm[1:];
653 if sVarNm.endswith('_Jmp'):
654 sVarNm = sVarNm[:-4];
655 sName += '_Jmp';
656 elif sVarNm.endswith('_NoJmp'):
657 sVarNm = sVarNm[:-6];
658 sName += '_NoJmp';
659 else:
660 sVarNm = 'DeferToCImpl';
661
662 return '%s/%s%s' % ( sVarNm, sName, self.oParent.sSubName );
663
664 def isWithFlagsCheckingAndClearingVariation(self):
665 """
666 Checks if this is a variation that checks and clears EFLAGS.
667 """
668 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
669
670 #
671 # Analysis and code morphing.
672 #
673
674 def raiseProblem(self, sMessage):
675 """ Raises a problem. """
676 self.oParent.raiseProblem(sMessage);
677
678 def warning(self, sMessage):
679 """ Emits a warning. """
680 self.oParent.warning(sMessage);
681
682 def analyzeReferenceToType(self, sRef):
683 """
684 Translates a variable or structure reference to a type.
685 Returns type name.
686 Raises exception if unable to figure it out.
687 """
688 ch0 = sRef[0];
689 if ch0 == 'u':
690 if sRef.startswith('u32'):
691 return 'uint32_t';
692 if sRef.startswith('u8') or sRef == 'uReg':
693 return 'uint8_t';
694 if sRef.startswith('u64'):
695 return 'uint64_t';
696 if sRef.startswith('u16'):
697 return 'uint16_t';
698 elif ch0 == 'b':
699 return 'uint8_t';
700 elif ch0 == 'f':
701 return 'bool';
702 elif ch0 == 'i':
703 if sRef.startswith('i8'):
704 return 'int8_t';
705 if sRef.startswith('i16'):
706 return 'int16_t';
707 if sRef.startswith('i32'):
708 return 'int32_t';
709 if sRef.startswith('i64'):
710 return 'int64_t';
711 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
712 return 'uint8_t';
713 elif ch0 == 'p':
714 if sRef.find('-') < 0:
715 return 'uintptr_t';
716 if sRef.startswith('pVCpu->iem.s.'):
717 sField = sRef[len('pVCpu->iem.s.') : ];
718 if sField in g_kdIemFieldToType:
719 if g_kdIemFieldToType[sField][0]:
720 return g_kdIemFieldToType[sField][0];
721 elif ch0 == 'G' and sRef.startswith('GCPtr'):
722 return 'uint64_t';
723 elif ch0 == 'e':
724 if sRef == 'enmEffOpSize':
725 return 'IEMMODE';
726 elif ch0 == 'o':
727 if sRef.startswith('off32'):
728 return 'uint32_t';
729 elif sRef == 'cbFrame': # enter
730 return 'uint16_t';
731 elif sRef == 'cShift': ## @todo risky
732 return 'uint8_t';
733
734 self.raiseProblem('Unknown reference: %s' % (sRef,));
735 return None; # Shut up pylint 2.16.2.
736
737 def analyzeCallToType(self, sFnRef):
738 """
739 Determins the type of an indirect function call.
740 """
741 assert sFnRef[0] == 'p';
742
743 #
744 # Simple?
745 #
746 if sFnRef.find('-') < 0:
747 oDecoderFunction = self.oParent.oMcBlock.oFunction;
748
749 # Try the argument list of the function defintion macro invocation first.
750 iArg = 2;
751 while iArg < len(oDecoderFunction.asDefArgs):
752 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
753 return oDecoderFunction.asDefArgs[iArg - 1];
754 iArg += 1;
755
756 # Then check out line that includes the word and looks like a variable declaration.
757 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
758 for sLine in oDecoderFunction.asLines:
759 oMatch = oRe.match(sLine);
760 if oMatch:
761 if not oMatch.group(1).startswith('const'):
762 return oMatch.group(1);
763 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
764
765 #
766 # Deal with the pImpl->pfnXxx:
767 #
768 elif sFnRef.startswith('pImpl->pfn'):
769 sMember = sFnRef[len('pImpl->') : ];
770 sBaseType = self.analyzeCallToType('pImpl');
771 offBits = sMember.rfind('U') + 1;
772 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
773 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
774 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
775 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
776 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
777 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
778 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
779 if sBaseType == 'PCIEMOPMEDIAOPTF2IMM8': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:] + 'IMM8';
780 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
781 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
782 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
783
784 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
785
786 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
787 return None; # Shut up pylint 2.16.2.
788
789 def analyze8BitGRegStmt(self, oStmt):
790 """
791 Gets the 8-bit general purpose register access details of the given statement.
792 ASSUMES the statement is one accessing an 8-bit GREG.
793 """
794 idxReg = 0;
795 if ( oStmt.sName.find('_FETCH_') > 0
796 or oStmt.sName.find('_REF_') > 0
797 or oStmt.sName.find('_TO_LOCAL') > 0):
798 idxReg = 1;
799
800 sRegRef = oStmt.asParams[idxReg];
801 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
802 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
803 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
804 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
805 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
806 else:
807 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
808
809 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
810 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
811 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
812 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
813 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
814 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
815 else:
816 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
817 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
818 sStdRef = 'bOther8Ex';
819
820 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
821 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
822 return (idxReg, sOrgExpr, sStdRef);
823
824
825 ## Maps memory related MCs to info for FLAT conversion.
826 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
827 ## segmentation checking for every memory access. Only applied to access
828 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
829 ## the latter (CS) is just to keep things simple (we could safely fetch via
830 ## it, but only in 64-bit mode could we safely write via it, IIRC).
831 kdMemMcToFlatInfo = {
832 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
833 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
834 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
835 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
836 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
837 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
838 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
839 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
840 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
841 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
842 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
843 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
844 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
845 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
846 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
847 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
848 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
849 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
850 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
851 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
852 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
853 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
854 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
855 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
856 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
857 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
858 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
859 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
860 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
861 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
862 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
863 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
864 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
865 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
866 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
867 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
868 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
869 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
870 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
871 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
872 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
873 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
874 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
875 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
876 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
877 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
878 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
879 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
880 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
881 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
882 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
883 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
884 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
885 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
886 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
887 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
888 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
889 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
890 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
891 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
892 'IEM_MC_STORE_MEM_U128_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_NO_AC' ),
893 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
894 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
895 'IEM_MC_STORE_MEM_U256_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_NO_AC' ),
896 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
897 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
898 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
899 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
900 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
901 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
902 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
903 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
904 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
905 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
906 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
907 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
908 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
909 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
910 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
911 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
912 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
913 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
914 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
915 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
916 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
917 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
918 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
919 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
920 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
921 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
922 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
923 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
924 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
925 };
926
927 kdMemMcToFlatInfoStack = {
928 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
929 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
930 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
931 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
932 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
933 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
934 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
935 };
936
937 kdThreadedCalcRmEffAddrMcByVariation = {
938 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
939 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
940 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
941 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
942 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
943 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
944 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
945 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
946 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
947 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
948 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
949 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
950 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
951 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
952 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
953 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
954 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
955 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
956 };
957
958 def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0, iLevel = 0):
959 """
960 Transforms (copy) the statements into those for the threaded function.
961
962 Returns list/tree of statements (aoStmts is not modified) and the new
963 iParamRef value.
964 """
965 #
966 # We'll be traversing aoParamRefs in parallel to the statements, so we
967 # must match the traversal in analyzeFindThreadedParamRefs exactly.
968 #
969 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
970 aoThreadedStmts = [];
971 for oStmt in aoStmts:
972 # Skip C++ statements that is purely related to decoding.
973 if not oStmt.isCppStmt() or not oStmt.fDecode:
974 # Copy the statement. Make a deep copy to make sure we've got our own
975 # copies of all instance variables, even if a bit overkill at the moment.
976 oNewStmt = copy.deepcopy(oStmt);
977 aoThreadedStmts.append(oNewStmt);
978 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
979
980 # If the statement has parameter references, process the relevant parameters.
981 # We grab the references relevant to this statement and apply them in reserve order.
982 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
983 iParamRefFirst = iParamRef;
984 while True:
985 iParamRef += 1;
986 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
987 break;
988
989 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
990 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
991 oCurRef = self.aoParamRefs[iCurRef];
992 if oCurRef.iParam is not None:
993 assert oCurRef.oStmt == oStmt;
994 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
995 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
996 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
997 or oCurRef.fCustomRef), \
998 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
999 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
1000 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
1001 + oCurRef.sNewName \
1002 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
1003
1004 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
1005 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1006 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
1007 assert len(oNewStmt.asParams) == 3;
1008
1009 if self.sVariation in self.kdVariationsWithFlatAddr16:
1010 oNewStmt.asParams = [
1011 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
1012 ];
1013 else:
1014 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
1015 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
1016 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1017
1018 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1019 oNewStmt.asParams = [
1020 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1021 ];
1022 else:
1023 oNewStmt.asParams = [
1024 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1025 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1026 ];
1027 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1028 elif ( oNewStmt.sName
1029 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1030 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1031 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH', )):
1032 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1033 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1034 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1035 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
1036 and self.sVariation not in self.kdVariationsOnlyPre386):
1037 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1038 oNewStmt.sName += '_THREADED';
1039 if self.sVariation in self.kdVariationsOnly64NoFlags:
1040 oNewStmt.sName += '_PC64';
1041 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1042 oNewStmt.sName += '_PC64_WITH_FLAGS';
1043 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1044 oNewStmt.sName += '_PC16';
1045 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1046 oNewStmt.sName += '_PC16_WITH_FLAGS';
1047 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1048 assert self.sVariation != self.ksVariation_Default;
1049 oNewStmt.sName += '_PC32';
1050 else:
1051 oNewStmt.sName += '_PC32_WITH_FLAGS';
1052
1053 # This is making the wrong branch of conditionals break out of the TB.
1054 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1055 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1056 sExitTbStatus = 'VINF_SUCCESS';
1057 if self.sVariation in self.kdVariationsWithConditional:
1058 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1059 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1060 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1061 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1062 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1063 oNewStmt.asParams.append(sExitTbStatus);
1064
1065 # Insert an MC so we can assert the correctioness of modified flags annotations on IEM_MC_REF_EFLAGS.
1066 if 'IEM_MC_ASSERT_EFLAGS' in dState:
1067 aoThreadedStmts.insert(len(aoThreadedStmts) - 1,
1068 iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1069 del dState['IEM_MC_ASSERT_EFLAGS'];
1070
1071 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1072 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1073 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1074 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1075 oNewStmt.sName += '_THREADED';
1076
1077 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1078 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1079 oNewStmt.sName += '_THREADED';
1080 oNewStmt.idxFn += 1;
1081 oNewStmt.idxParams += 1;
1082 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1083
1084 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1085 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1086 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1087 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1088 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1089 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1090 if idxEffSeg != -1:
1091 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1092 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1093 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1094 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1095 oNewStmt.asParams.pop(idxEffSeg);
1096 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1097
1098 # ... PUSH and POP also needs flat variants, but these differ a little.
1099 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1100 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1101 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1102 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1103 self.kdVariationsWithFlat64StackAddress)];
1104
1105 # Add EFLAGS usage annotations to relevant MCs.
1106 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_REF_EFLAGS', 'IEM_MC_FETCH_EFLAGS'):
1107 oInstruction = self.oParent.oMcBlock.oInstruction;
1108 oNewStmt.sName += '_EX';
1109 oNewStmt.asParams.append(oInstruction.getTestedFlagsCStyle()); # Shall crash and burn if oInstruction is
1110 oNewStmt.asParams.append(oInstruction.getModifiedFlagsCStyle()); # None. Fix the IEM decoder code.
1111
1112 # For IEM_MC_REF_EFLAGS we to emit an MC before the ..._FINISH
1113 if oNewStmt.sName == 'IEM_MC_REF_EFLAGS_EX':
1114 dState['IEM_MC_ASSERT_EFLAGS'] = iLevel;
1115
1116 # Process branches of conditionals recursively.
1117 if isinstance(oStmt, iai.McStmtCond):
1118 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, dState,
1119 iParamRef, iLevel + 1);
1120 if oStmt.aoElseBranch:
1121 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch,
1122 dState, iParamRef, iLevel + 1);
1123
1124 # Insert an MC so we can assert the correctioness of modified flags annotations
1125 # on IEM_MC_REF_EFLAGS if it goes out of scope.
1126 if dState.get('IEM_MC_ASSERT_EFLAGS', -1) == iLevel:
1127 aoThreadedStmts.append(iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1128 del dState['IEM_MC_ASSERT_EFLAGS'];
1129
1130 return (aoThreadedStmts, iParamRef);
1131
1132
1133 def analyzeConsolidateThreadedParamRefs(self):
1134 """
1135 Consolidate threaded function parameter references into a dictionary
1136 with lists of the references to each variable/field.
1137 """
1138 # Gather unique parameters.
1139 self.dParamRefs = {};
1140 for oRef in self.aoParamRefs:
1141 if oRef.sStdRef not in self.dParamRefs:
1142 self.dParamRefs[oRef.sStdRef] = [oRef,];
1143 else:
1144 self.dParamRefs[oRef.sStdRef].append(oRef);
1145
1146 # Generate names for them for use in the threaded function.
1147 dParamNames = {};
1148 for sName, aoRefs in self.dParamRefs.items():
1149 # Morph the reference expression into a name.
1150 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1151 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1152 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1153 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1154 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1155 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1156 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1157 else:
1158 sName += 'P';
1159
1160 # Ensure it's unique.
1161 if sName in dParamNames:
1162 for i in range(10):
1163 if sName + str(i) not in dParamNames:
1164 sName += str(i);
1165 break;
1166 dParamNames[sName] = True;
1167
1168 # Update all the references.
1169 for oRef in aoRefs:
1170 oRef.sNewName = sName;
1171
1172 # Organize them by size too for the purpose of optimize them.
1173 dBySize = {} # type: Dict[str, str]
1174 for sStdRef, aoRefs in self.dParamRefs.items():
1175 if aoRefs[0].sType[0] != 'P':
1176 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1177 assert(cBits <= 64);
1178 else:
1179 cBits = 64;
1180
1181 if cBits not in dBySize:
1182 dBySize[cBits] = [sStdRef,]
1183 else:
1184 dBySize[cBits].append(sStdRef);
1185
1186 # Pack the parameters as best as we can, starting with the largest ones
1187 # and ASSUMING a 64-bit parameter size.
1188 self.cMinParams = 0;
1189 offNewParam = 0;
1190 for cBits in sorted(dBySize.keys(), reverse = True):
1191 for sStdRef in dBySize[cBits]:
1192 if offNewParam == 0 or offNewParam + cBits > 64:
1193 self.cMinParams += 1;
1194 offNewParam = cBits;
1195 else:
1196 offNewParam += cBits;
1197 assert(offNewParam <= 64);
1198
1199 for oRef in self.dParamRefs[sStdRef]:
1200 oRef.iNewParam = self.cMinParams - 1;
1201 oRef.offNewParam = offNewParam - cBits;
1202
1203 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1204 if self.cMinParams >= 4:
1205 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1206 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1207
1208 return True;
1209
1210 ksHexDigits = '0123456789abcdefABCDEF';
1211
1212 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1213 """
1214 Scans the statements for things that have to passed on to the threaded
1215 function (populates self.aoParamRefs).
1216 """
1217 for oStmt in aoStmts:
1218 # Some statements we can skip alltogether.
1219 if isinstance(oStmt, iai.McCppPreProc):
1220 continue;
1221 if oStmt.isCppStmt() and oStmt.fDecode:
1222 continue;
1223 if oStmt.sName in ('IEM_MC_BEGIN',):
1224 continue;
1225
1226 if isinstance(oStmt, iai.McStmtVar):
1227 if oStmt.sValue is None:
1228 continue;
1229 aiSkipParams = { 0: True, 1: True, 3: True };
1230 else:
1231 aiSkipParams = {};
1232
1233 # Several statements have implicit parameters and some have different parameters.
1234 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1235 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
1236 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1237 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1238 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1239 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1240
1241 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
1242 and self.sVariation not in self.kdVariationsOnlyPre386):
1243 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1244
1245 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1246 # This is being pretty presumptive about bRm always being the RM byte...
1247 assert len(oStmt.asParams) == 3;
1248 assert oStmt.asParams[1] == 'bRm';
1249
1250 if self.sVariation in self.kdVariationsWithFlatAddr16:
1251 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1252 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1253 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1254 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1255 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1256 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1257 'uint8_t', oStmt, sStdRef = 'bSib'));
1258 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1259 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1260 else:
1261 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1262 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1263 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1264 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1265 'uint8_t', oStmt, sStdRef = 'bSib'));
1266 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1267 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1268 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1269 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1270 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1271
1272 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1273 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1274 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1275 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint8_t', oStmt, idxReg, sStdRef = sStdRef));
1276 aiSkipParams[idxReg] = True; # Skip the parameter below.
1277
1278 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1279 if ( self.sVariation in self.kdVariationsWithFlatAddress
1280 and oStmt.sName in self.kdMemMcToFlatInfo
1281 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1282 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1283
1284 # Inspect the target of calls to see if we need to pass down a
1285 # function pointer or function table pointer for it to work.
1286 if isinstance(oStmt, iai.McStmtCall):
1287 if oStmt.sFn[0] == 'p':
1288 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1289 elif ( oStmt.sFn[0] != 'i'
1290 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1291 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1292 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1293 aiSkipParams[oStmt.idxFn] = True;
1294
1295 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1296 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1297 assert oStmt.idxFn == 2;
1298 aiSkipParams[0] = True;
1299
1300 # Skip the function parameter (first) for IEM_MC_NATIVE_EMIT_X.
1301 if oStmt.sName.startswith('IEM_MC_NATIVE_EMIT_'):
1302 aiSkipParams[0] = True;
1303
1304
1305 # Check all the parameters for bogus references.
1306 for iParam, sParam in enumerate(oStmt.asParams):
1307 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1308 # The parameter may contain a C expression, so we have to try
1309 # extract the relevant bits, i.e. variables and fields while
1310 # ignoring operators and parentheses.
1311 offParam = 0;
1312 while offParam < len(sParam):
1313 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1314 ch = sParam[offParam];
1315 if ch.isalpha() or ch == '_':
1316 offStart = offParam;
1317 offParam += 1;
1318 while offParam < len(sParam):
1319 ch = sParam[offParam];
1320 if not ch.isalnum() and ch != '_' and ch != '.':
1321 if ch != '-' or sParam[offParam + 1] != '>':
1322 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1323 if ( ch == '('
1324 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1325 offParam += len('(pVM)->') - 1;
1326 else:
1327 break;
1328 offParam += 1;
1329 offParam += 1;
1330 sRef = sParam[offStart : offParam];
1331
1332 # For register references, we pass the full register indexes instead as macros
1333 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1334 # threaded function will be more efficient if we just pass the register index
1335 # as a 4-bit param.
1336 if ( sRef.startswith('IEM_GET_MODRM')
1337 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1338 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1339 if sParam[offParam] != '(':
1340 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1341 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1342 if asMacroParams is None:
1343 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1344 offParam = offCloseParam + 1;
1345 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1346 oStmt, iParam, offStart));
1347
1348 # We can skip known variables.
1349 elif sRef in self.oParent.dVariables:
1350 pass;
1351
1352 # Skip certain macro invocations.
1353 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1354 'IEM_GET_GUEST_CPU_FEATURES',
1355 'IEM_IS_GUEST_CPU_AMD',
1356 'IEM_IS_16BIT_CODE',
1357 'IEM_IS_32BIT_CODE',
1358 'IEM_IS_64BIT_CODE',
1359 ):
1360 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1361 if sParam[offParam] != '(':
1362 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1363 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1364 if asMacroParams is None:
1365 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1366 offParam = offCloseParam + 1;
1367
1368 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1369 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1370 'IEM_IS_16BIT_CODE',
1371 'IEM_IS_32BIT_CODE',
1372 'IEM_IS_64BIT_CODE',
1373 ):
1374 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1375 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1376 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1377 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1378 offParam += 1;
1379
1380 # Skip constants, globals, types (casts), sizeof and macros.
1381 elif ( sRef.startswith('IEM_OP_PRF_')
1382 or sRef.startswith('IEM_ACCESS_')
1383 or sRef.startswith('IEMINT_')
1384 or sRef.startswith('X86_GREG_')
1385 or sRef.startswith('X86_SREG_')
1386 or sRef.startswith('X86_EFL_')
1387 or sRef.startswith('X86_FSW_')
1388 or sRef.startswith('X86_FCW_')
1389 or sRef.startswith('X86_XCPT_')
1390 or sRef.startswith('IEMMODE_')
1391 or sRef.startswith('IEM_F_')
1392 or sRef.startswith('IEM_CIMPL_F_')
1393 or sRef.startswith('g_')
1394 or sRef.startswith('iemAImpl_')
1395 or sRef.startswith('kIemNativeGstReg_')
1396 or sRef.startswith('RT_ARCH_VAL_')
1397 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1398 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1399 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
1400 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1401 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1402 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1403 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1404 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1405 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1406 'NIL_RTGCPTR',) ):
1407 pass;
1408
1409 # Skip certain macro invocations.
1410 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1411 elif ( ( '.' not in sRef
1412 and '-' not in sRef
1413 and sRef not in ('pVCpu', ) )
1414 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1415 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1416 oStmt, iParam, offStart));
1417 # Number.
1418 elif ch.isdigit():
1419 if ( ch == '0'
1420 and offParam + 2 <= len(sParam)
1421 and sParam[offParam + 1] in 'xX'
1422 and sParam[offParam + 2] in self.ksHexDigits ):
1423 offParam += 2;
1424 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1425 offParam += 1;
1426 else:
1427 while offParam < len(sParam) and sParam[offParam].isdigit():
1428 offParam += 1;
1429 # Comment?
1430 elif ( ch == '/'
1431 and offParam + 4 <= len(sParam)
1432 and sParam[offParam + 1] == '*'):
1433 offParam += 2;
1434 offNext = sParam.find('*/', offParam);
1435 if offNext < offParam:
1436 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1437 offParam = offNext + 2;
1438 # Whatever else.
1439 else:
1440 offParam += 1;
1441
1442 # Traverse the branches of conditionals.
1443 if isinstance(oStmt, iai.McStmtCond):
1444 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1445 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1446 return True;
1447
1448 def analyzeVariation(self, aoStmts):
1449 """
1450 2nd part of the analysis, done on each variation.
1451
1452 The variations may differ in parameter requirements and will end up with
1453 slightly different MC sequences. Thus this is done on each individually.
1454
1455 Returns dummy True - raises exception on trouble.
1456 """
1457 # Now scan the code for variables and field references that needs to
1458 # be passed to the threaded function because they are related to the
1459 # instruction decoding.
1460 self.analyzeFindThreadedParamRefs(aoStmts);
1461 self.analyzeConsolidateThreadedParamRefs();
1462
1463 # Morph the statement stream for the block into what we'll be using in the threaded function.
1464 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts, {});
1465 if iParamRef != len(self.aoParamRefs):
1466 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1467
1468 return True;
1469
1470 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1471 """
1472 Produces generic C++ statments that emits a call to the thread function
1473 variation and any subsequent checks that may be necessary after that.
1474
1475 The sCallVarNm is the name of the variable with the threaded function
1476 to call. This is for the case where all the variations have the same
1477 parameters and only the threaded function number differs.
1478 """
1479 aoStmts = [
1480 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1481 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1482 cchIndent = cchIndent), # Scope and a hook for various stuff.
1483 ];
1484
1485 # The call to the threaded function.
1486 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1487 for iParam in range(self.cMinParams):
1488 asFrags = [];
1489 for aoRefs in self.dParamRefs.values():
1490 oRef = aoRefs[0];
1491 if oRef.iNewParam == iParam:
1492 sCast = '(uint64_t)'
1493 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1494 sCast = '(uint64_t)(u' + oRef.sType + ')';
1495 if oRef.offNewParam == 0:
1496 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1497 else:
1498 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1499 assert asFrags;
1500 asCallArgs.append(' | '.join(asFrags));
1501
1502 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1503
1504 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1505 # emit this mode check from the compilation loop. On the
1506 # plus side, this means we eliminate unnecessary call at
1507 # end of the TB. :-)
1508 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1509 ## mask and maybe emit additional checks.
1510 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1511 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1512 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1513 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1514 # cchIndent = cchIndent));
1515
1516 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1517 if not sCImplFlags:
1518 sCImplFlags = '0'
1519 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1520
1521 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1522 # indicates we should do so.
1523 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1524 asEndTbFlags = [];
1525 asTbBranchedFlags = [];
1526 for sFlag in self.oParent.dsCImplFlags:
1527 if self.kdCImplFlags[sFlag] is True:
1528 asEndTbFlags.append(sFlag);
1529 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1530 asTbBranchedFlags.append(sFlag);
1531 if ( asTbBranchedFlags
1532 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1533 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1534 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1535 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1536 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1537 if asEndTbFlags:
1538 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1539 cchIndent = cchIndent));
1540
1541 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1542 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1543
1544 return aoStmts;
1545
1546
1547class ThreadedFunction(object):
1548 """
1549 A threaded function.
1550 """
1551
1552 def __init__(self, oMcBlock: iai.McBlock) -> None:
1553 self.oMcBlock = oMcBlock # type: iai.McBlock
1554 # The remaining fields are only useful after analyze() has been called:
1555 ## Variations for this block. There is at least one.
1556 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1557 ## Variation dictionary containing the same as aoVariations.
1558 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1559 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1560 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1561 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1562 ## and those determined by analyzeCodeOperation().
1563 self.dsCImplFlags = {} # type: Dict[str, bool]
1564 ## The unique sub-name for this threaded function.
1565 self.sSubName = '';
1566 #if oMcBlock.iInFunction > 0 or (oMcBlock.oInstruction and len(oMcBlock.oInstruction.aoMcBlocks) > 1):
1567 # self.sSubName = '_%s' % (oMcBlock.iInFunction);
1568
1569 @staticmethod
1570 def dummyInstance():
1571 """ Gets a dummy instance. """
1572 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1573 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1574
1575 def hasWithFlagsCheckingAndClearingVariation(self):
1576 """
1577 Check if there is one or more with flags checking and clearing
1578 variations for this threaded function.
1579 """
1580 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1581 if sVarWithFlags in self.dVariations:
1582 return True;
1583 return False;
1584
1585 #
1586 # Analysis and code morphing.
1587 #
1588
1589 def raiseProblem(self, sMessage):
1590 """ Raises a problem. """
1591 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1592
1593 def error(self, sMessage, oGenerator):
1594 """ Emits an error via the generator object, causing it to fail. """
1595 oGenerator.rawError('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1596
1597 def warning(self, sMessage):
1598 """ Emits a warning. """
1599 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1600
1601 ## Used by analyzeAndAnnotateName for memory MC blocks.
1602 kdAnnotateNameMemStmts = {
1603 'IEM_MC_FETCH_MEM16_U8': '__mem8',
1604 'IEM_MC_FETCH_MEM32_U8': '__mem8',
1605 'IEM_MC_FETCH_MEM_D80': '__mem80',
1606 'IEM_MC_FETCH_MEM_I16': '__mem16',
1607 'IEM_MC_FETCH_MEM_I32': '__mem32',
1608 'IEM_MC_FETCH_MEM_I64': '__mem64',
1609 'IEM_MC_FETCH_MEM_R32': '__mem32',
1610 'IEM_MC_FETCH_MEM_R64': '__mem64',
1611 'IEM_MC_FETCH_MEM_R80': '__mem80',
1612 'IEM_MC_FETCH_MEM_U128': '__mem128',
1613 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': '__mem128',
1614 'IEM_MC_FETCH_MEM_U128_NO_AC': '__mem128',
1615 'IEM_MC_FETCH_MEM_U16': '__mem16',
1616 'IEM_MC_FETCH_MEM_U16_DISP': '__mem16',
1617 'IEM_MC_FETCH_MEM_U16_SX_U32': '__mem16sx32',
1618 'IEM_MC_FETCH_MEM_U16_SX_U64': '__mem16sx64',
1619 'IEM_MC_FETCH_MEM_U16_ZX_U32': '__mem16zx32',
1620 'IEM_MC_FETCH_MEM_U16_ZX_U64': '__mem16zx64',
1621 'IEM_MC_FETCH_MEM_U256': '__mem256',
1622 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': '__mem256',
1623 'IEM_MC_FETCH_MEM_U256_NO_AC': '__mem256',
1624 'IEM_MC_FETCH_MEM_U32': '__mem32',
1625 'IEM_MC_FETCH_MEM_U32_DISP': '__mem32',
1626 'IEM_MC_FETCH_MEM_U32_SX_U64': '__mem32sx64',
1627 'IEM_MC_FETCH_MEM_U32_ZX_U64': '__mem32zx64',
1628 'IEM_MC_FETCH_MEM_U64': '__mem64',
1629 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': '__mem64',
1630 'IEM_MC_FETCH_MEM_U64_DISP': '__mem64',
1631 'IEM_MC_FETCH_MEM_U8': '__mem8',
1632 'IEM_MC_FETCH_MEM_U8_DISP': '__mem8',
1633 'IEM_MC_FETCH_MEM_U8_SX_U16': '__mem8sx16',
1634 'IEM_MC_FETCH_MEM_U8_SX_U32': '__mem8sx32',
1635 'IEM_MC_FETCH_MEM_U8_SX_U64': '__mem8sx64',
1636 'IEM_MC_FETCH_MEM_U8_ZX_U16': '__mem8zx16',
1637 'IEM_MC_FETCH_MEM_U8_ZX_U32': '__mem8zx32',
1638 'IEM_MC_FETCH_MEM_U8_ZX_U64': '__mem8zx64',
1639 'IEM_MC_FETCH_MEM_XMM': '__mem128',
1640 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': '__mem128',
1641 'IEM_MC_FETCH_MEM_XMM_NO_AC': '__mem128',
1642 'IEM_MC_FETCH_MEM_XMM_U32': '__mem32',
1643 'IEM_MC_FETCH_MEM_XMM_U64': '__mem64',
1644 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': '__mem128',
1645 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': '__mem128',
1646 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': '__mem32',
1647 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': '__mem64',
1648 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64': '__mem128',
1649 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64': '__mem128',
1650
1651 'IEM_MC_STORE_MEM_I16_CONST_BY_REF': '__mem16',
1652 'IEM_MC_STORE_MEM_I32_CONST_BY_REF': '__mem32',
1653 'IEM_MC_STORE_MEM_I64_CONST_BY_REF': '__mem64',
1654 'IEM_MC_STORE_MEM_I8_CONST_BY_REF': '__mem8',
1655 'IEM_MC_STORE_MEM_INDEF_D80_BY_REF': '__mem80',
1656 'IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF': '__mem32',
1657 'IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF': '__mem64',
1658 'IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF': '__mem80',
1659 'IEM_MC_STORE_MEM_U128': '__mem128',
1660 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': '__mem128',
1661 'IEM_MC_STORE_MEM_U128_NO_AC': '__mem128',
1662 'IEM_MC_STORE_MEM_U16': '__mem16',
1663 'IEM_MC_STORE_MEM_U16_CONST': '__mem16c',
1664 'IEM_MC_STORE_MEM_U256': '__mem256',
1665 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': '__mem256',
1666 'IEM_MC_STORE_MEM_U256_NO_AC': '__mem256',
1667 'IEM_MC_STORE_MEM_U32': '__mem32',
1668 'IEM_MC_STORE_MEM_U32_CONST': '__mem32c',
1669 'IEM_MC_STORE_MEM_U64': '__mem64',
1670 'IEM_MC_STORE_MEM_U64_CONST': '__mem64c',
1671 'IEM_MC_STORE_MEM_U8': '__mem8',
1672 'IEM_MC_STORE_MEM_U8_CONST': '__mem8c',
1673
1674 'IEM_MC_MEM_MAP_D80_WO': '__mem80',
1675 'IEM_MC_MEM_MAP_I16_WO': '__mem16',
1676 'IEM_MC_MEM_MAP_I32_WO': '__mem32',
1677 'IEM_MC_MEM_MAP_I64_WO': '__mem64',
1678 'IEM_MC_MEM_MAP_R32_WO': '__mem32',
1679 'IEM_MC_MEM_MAP_R64_WO': '__mem64',
1680 'IEM_MC_MEM_MAP_R80_WO': '__mem80',
1681 'IEM_MC_MEM_MAP_U128_ATOMIC': '__mem128a',
1682 'IEM_MC_MEM_MAP_U128_RO': '__mem128',
1683 'IEM_MC_MEM_MAP_U128_RW': '__mem128',
1684 'IEM_MC_MEM_MAP_U128_WO': '__mem128',
1685 'IEM_MC_MEM_MAP_U16_ATOMIC': '__mem16a',
1686 'IEM_MC_MEM_MAP_U16_RO': '__mem16',
1687 'IEM_MC_MEM_MAP_U16_RW': '__mem16',
1688 'IEM_MC_MEM_MAP_U16_WO': '__mem16',
1689 'IEM_MC_MEM_MAP_U32_ATOMIC': '__mem32a',
1690 'IEM_MC_MEM_MAP_U32_RO': '__mem32',
1691 'IEM_MC_MEM_MAP_U32_RW': '__mem32',
1692 'IEM_MC_MEM_MAP_U32_WO': '__mem32',
1693 'IEM_MC_MEM_MAP_U64_ATOMIC': '__mem64a',
1694 'IEM_MC_MEM_MAP_U64_RO': '__mem64',
1695 'IEM_MC_MEM_MAP_U64_RW': '__mem64',
1696 'IEM_MC_MEM_MAP_U64_WO': '__mem64',
1697 'IEM_MC_MEM_MAP_U8_ATOMIC': '__mem8a',
1698 'IEM_MC_MEM_MAP_U8_RO': '__mem8',
1699 'IEM_MC_MEM_MAP_U8_RW': '__mem8',
1700 'IEM_MC_MEM_MAP_U8_WO': '__mem8',
1701 };
1702 ## Used by analyzeAndAnnotateName for non-memory MC blocks.
1703 kdAnnotateNameRegStmts = {
1704 'IEM_MC_FETCH_GREG_U8': '__greg8',
1705 'IEM_MC_FETCH_GREG_U8_ZX_U16': '__greg8zx16',
1706 'IEM_MC_FETCH_GREG_U8_ZX_U32': '__greg8zx32',
1707 'IEM_MC_FETCH_GREG_U8_ZX_U64': '__greg8zx64',
1708 'IEM_MC_FETCH_GREG_U8_SX_U16': '__greg8sx16',
1709 'IEM_MC_FETCH_GREG_U8_SX_U32': '__greg8sx32',
1710 'IEM_MC_FETCH_GREG_U8_SX_U64': '__greg8sx64',
1711 'IEM_MC_FETCH_GREG_U16': '__greg16',
1712 'IEM_MC_FETCH_GREG_U16_ZX_U32': '__greg16zx32',
1713 'IEM_MC_FETCH_GREG_U16_ZX_U64': '__greg16zx64',
1714 'IEM_MC_FETCH_GREG_U16_SX_U32': '__greg16sx32',
1715 'IEM_MC_FETCH_GREG_U16_SX_U64': '__greg16sx64',
1716 'IEM_MC_FETCH_GREG_U32': '__greg32',
1717 'IEM_MC_FETCH_GREG_U32_ZX_U64': '__greg32zx64',
1718 'IEM_MC_FETCH_GREG_U32_SX_U64': '__greg32sx64',
1719 'IEM_MC_FETCH_GREG_U64': '__greg64',
1720 'IEM_MC_FETCH_GREG_U64_ZX_U64': '__greg64zx64',
1721 'IEM_MC_FETCH_GREG_PAIR_U32': '__greg32',
1722 'IEM_MC_FETCH_GREG_PAIR_U64': '__greg64',
1723
1724 'IEM_MC_STORE_GREG_U8': '__greg8',
1725 'IEM_MC_STORE_GREG_U16': '__greg16',
1726 'IEM_MC_STORE_GREG_U32': '__greg32',
1727 'IEM_MC_STORE_GREG_U64': '__greg64',
1728 'IEM_MC_STORE_GREG_I64': '__greg64',
1729 'IEM_MC_STORE_GREG_U8_CONST': '__greg8c',
1730 'IEM_MC_STORE_GREG_U16_CONST': '__greg16c',
1731 'IEM_MC_STORE_GREG_U32_CONST': '__greg32c',
1732 'IEM_MC_STORE_GREG_U64_CONST': '__greg64c',
1733 'IEM_MC_STORE_GREG_PAIR_U32': '__greg32',
1734 'IEM_MC_STORE_GREG_PAIR_U64': '__greg64',
1735
1736 'IEM_MC_FETCH_SREG_U16': '__sreg16',
1737 'IEM_MC_FETCH_SREG_ZX_U32': '__sreg32',
1738 'IEM_MC_FETCH_SREG_ZX_U64': '__sreg64',
1739 'IEM_MC_FETCH_SREG_BASE_U64': '__sbase64',
1740 'IEM_MC_FETCH_SREG_BASE_U32': '__sbase32',
1741 'IEM_MC_STORE_SREG_BASE_U64': '__sbase64',
1742 'IEM_MC_STORE_SREG_BASE_U32': '__sbase32',
1743
1744 'IEM_MC_REF_GREG_U8': '__greg8',
1745 'IEM_MC_REF_GREG_U16': '__greg16',
1746 'IEM_MC_REF_GREG_U32': '__greg32',
1747 'IEM_MC_REF_GREG_U64': '__greg64',
1748 'IEM_MC_REF_GREG_U8_CONST': '__greg8',
1749 'IEM_MC_REF_GREG_U16_CONST': '__greg16',
1750 'IEM_MC_REF_GREG_U32_CONST': '__greg32',
1751 'IEM_MC_REF_GREG_U64_CONST': '__greg64',
1752 'IEM_MC_REF_GREG_I32': '__greg32',
1753 'IEM_MC_REF_GREG_I64': '__greg64',
1754 'IEM_MC_REF_GREG_I32_CONST': '__greg32',
1755 'IEM_MC_REF_GREG_I64_CONST': '__greg64',
1756
1757 'IEM_MC_STORE_FPUREG_R80_SRC_REF': '__fpu',
1758 'IEM_MC_REF_FPUREG': '__fpu',
1759
1760 'IEM_MC_FETCH_MREG_U64': '__mreg64',
1761 'IEM_MC_FETCH_MREG_U32': '__mreg32',
1762 'IEM_MC_STORE_MREG_U64': '__mreg64',
1763 'IEM_MC_STORE_MREG_U32_ZX_U64': '__mreg32zx64',
1764 'IEM_MC_REF_MREG_U64': '__mreg64',
1765 'IEM_MC_REF_MREG_U64_CONST': '__mreg64',
1766 'IEM_MC_REF_MREG_U32_CONST': '__mreg32',
1767
1768 'IEM_MC_CLEAR_XREG_U32_MASK': '__xreg32x4',
1769 'IEM_MC_FETCH_XREG_U128': '__xreg128',
1770 'IEM_MC_FETCH_XREG_XMM': '__xreg128',
1771 'IEM_MC_FETCH_XREG_U64': '__xreg64',
1772 'IEM_MC_FETCH_XREG_U32': '__xreg32',
1773 'IEM_MC_FETCH_XREG_U16': '__xreg16',
1774 'IEM_MC_FETCH_XREG_U8': '__xreg8',
1775 'IEM_MC_FETCH_XREG_PAIR_U128': '__xreg128p',
1776 'IEM_MC_FETCH_XREG_PAIR_XMM': '__xreg128p',
1777 'IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64': '__xreg128p',
1778 'IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64': '__xreg128p',
1779
1780 'IEM_MC_STORE_XREG_U32_U128': '__xreg32',
1781 'IEM_MC_STORE_XREG_U128': '__xreg128',
1782 'IEM_MC_STORE_XREG_XMM': '__xreg128',
1783 'IEM_MC_STORE_XREG_XMM_U32': '__xreg32',
1784 'IEM_MC_STORE_XREG_XMM_U64': '__xreg64',
1785 'IEM_MC_STORE_XREG_U64': '__xreg64',
1786 'IEM_MC_STORE_XREG_U64_ZX_U128': '__xreg64zx128',
1787 'IEM_MC_STORE_XREG_U32': '__xreg32',
1788 'IEM_MC_STORE_XREG_U16': '__xreg16',
1789 'IEM_MC_STORE_XREG_U8': '__xreg8',
1790 'IEM_MC_STORE_XREG_U32_ZX_U128': '__xreg32zx128',
1791 'IEM_MC_STORE_XREG_HI_U64': '__xreg64hi',
1792 'IEM_MC_STORE_XREG_R32': '__xreg32',
1793 'IEM_MC_STORE_XREG_R64': '__xreg64',
1794 'IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX': '__xreg8zx',
1795 'IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX': '__xreg16zx',
1796 'IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX': '__xreg32zx',
1797 'IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX': '__xreg64zx',
1798 'IEM_MC_BROADCAST_XREG_U128_ZX_VLMAX': '__xreg128zx',
1799 'IEM_MC_REF_XREG_U128': '__xreg128',
1800 'IEM_MC_REF_XREG_U128_CONST': '__xreg128',
1801 'IEM_MC_REF_XREG_U32_CONST': '__xreg32',
1802 'IEM_MC_REF_XREG_U64_CONST': '__xreg64',
1803 'IEM_MC_REF_XREG_R32_CONST': '__xreg32',
1804 'IEM_MC_REF_XREG_R64_CONST': '__xreg64',
1805 'IEM_MC_REF_XREG_XMM_CONST': '__xreg128',
1806 'IEM_MC_COPY_XREG_U128': '__xreg128',
1807
1808 'IEM_MC_FETCH_YREG_U256': '__yreg256',
1809 'IEM_MC_FETCH_YREG_U128': '__yreg128',
1810 'IEM_MC_FETCH_YREG_U64': '__yreg64',
1811 'IEM_MC_FETCH_YREG_2ND_U64': '__yreg64',
1812 'IEM_MC_FETCH_YREG_U32': '__yreg32',
1813 'IEM_MC_STORE_YREG_U128': '__yreg128',
1814 'IEM_MC_STORE_YREG_U32_ZX_VLMAX': '__yreg32zx',
1815 'IEM_MC_STORE_YREG_U64_ZX_VLMAX': '__yreg64zx',
1816 'IEM_MC_STORE_YREG_U128_ZX_VLMAX': '__yreg128zx',
1817 'IEM_MC_STORE_YREG_U256_ZX_VLMAX': '__yreg256zx',
1818 'IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX': '__yreg8',
1819 'IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX': '__yreg16',
1820 'IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX': '__yreg32',
1821 'IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX': '__yreg64',
1822 'IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX': '__yreg128',
1823 'IEM_MC_REF_YREG_U128': '__yreg128',
1824 'IEM_MC_REF_YREG_U128_CONST': '__yreg128',
1825 'IEM_MC_REF_YREG_U64_CONST': '__yreg64',
1826 'IEM_MC_COPY_YREG_U256_ZX_VLMAX': '__yreg256zx',
1827 'IEM_MC_COPY_YREG_U128_ZX_VLMAX': '__yreg128zx',
1828 'IEM_MC_COPY_YREG_U64_ZX_VLMAX': '__yreg64zx',
1829 'IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX': '__yreg3296',
1830 'IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX': '__yreg6464',
1831 'IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX': '__yreg64hi64hi',
1832 'IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX': '__yreg64lo64lo',
1833 'IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX':'__yreg64',
1834 'IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX':'__yreg64',
1835 };
1836 def analyzeAndAnnotateName(self, aoStmts: List[iai.McStmt]):
1837 """
1838 Scans the statements and variation lists for clues about the threaded function,
1839 and sets self.sSubName if successfull.
1840 """
1841 dHits = {};
1842 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameMemStmts, dHits);
1843 if cHits > 0:
1844 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1845 sName = self.kdAnnotateNameMemStmts[sStmtNm];
1846 else:
1847 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameRegStmts, dHits);
1848 if not cHits:
1849 return;
1850 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1851 sName = self.kdAnnotateNameRegStmts[sStmtNm];
1852 self.sSubName = sName;
1853 return;
1854
1855 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1856 """ Scans the statements for MC variables and call arguments. """
1857 for oStmt in aoStmts:
1858 if isinstance(oStmt, iai.McStmtVar):
1859 if oStmt.sVarName in self.dVariables:
1860 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1861 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1862
1863 # There shouldn't be any variables or arguments declared inside if/
1864 # else blocks, but scan them too to be on the safe side.
1865 if isinstance(oStmt, iai.McStmtCond):
1866 #cBefore = len(self.dVariables);
1867 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1868 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1869 #if len(self.dVariables) != cBefore:
1870 # raise Exception('Variables/arguments defined in conditional branches!');
1871 return True;
1872
1873 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], dEflStmts, fSeenConditional = False) -> bool:
1874 """
1875 Analyzes the code looking clues as to additional side-effects.
1876
1877 Currently this is simply looking for branching and adding the relevant
1878 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1879 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1880
1881 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
1882
1883 Returns annotation on return style.
1884 """
1885 sAnnotation = None;
1886 for oStmt in aoStmts:
1887 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1888 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1889 assert not fSeenConditional;
1890 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1891 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1892 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1893 if fSeenConditional:
1894 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1895
1896 # Check for CIMPL and AIMPL calls.
1897 if oStmt.sName.startswith('IEM_MC_CALL_'):
1898 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1899 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1900 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1901 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1902 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1903 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1904 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1905 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1906 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1907 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1908 else:
1909 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1910
1911 # Check for return statements.
1912 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH',):
1913 assert sAnnotation is None;
1914 sAnnotation = g_ksFinishAnnotation_Advance;
1915 elif oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1916 'IEM_MC_REL_JMP_S32_AND_FINISH',):
1917 assert sAnnotation is None;
1918 sAnnotation = g_ksFinishAnnotation_RelJmp;
1919 elif oStmt.sName in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1920 'IEM_MC_SET_RIP_U64_AND_FINISH',):
1921 assert sAnnotation is None;
1922 sAnnotation = g_ksFinishAnnotation_SetJmp;
1923 elif oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1924 assert sAnnotation is None;
1925 sAnnotation = g_ksFinishAnnotation_DeferToCImpl;
1926
1927 # Collect MCs working on EFLAGS. Caller will check this.
1928 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS', 'IEM_MC_REF_EFLAGS',
1929 'IEM_MC_ARG_LOCAL_EFLAGS', ):
1930 dEflStmts[oStmt.sName] = oStmt;
1931 elif isinstance(oStmt, iai.McStmtCall):
1932 if oStmt.sName in ('IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2',
1933 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',):
1934 if ( oStmt.asParams[0].find('IEM_CIMPL_F_RFLAGS') >= 0
1935 or oStmt.asParams[0].find('IEM_CIMPL_F_STATUS_FLAGS') >= 0):
1936 dEflStmts[oStmt.sName] = oStmt;
1937
1938 # Process branches of conditionals recursively.
1939 if isinstance(oStmt, iai.McStmtCond):
1940 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, dEflStmts, True);
1941 if oStmt.aoElseBranch:
1942 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, dEflStmts, True);
1943
1944 return sAnnotation;
1945
1946 def analyze(self, oGenerator):
1947 """
1948 Analyzes the code, identifying the number of parameters it requires and such.
1949
1950 Returns dummy True - raises exception on trouble.
1951 """
1952
1953 #
1954 # Decode the block into a list/tree of McStmt objects.
1955 #
1956 aoStmts = self.oMcBlock.decode();
1957
1958 #
1959 # Check the block for errors before we proceed (will decode it).
1960 #
1961 asErrors = self.oMcBlock.check();
1962 if asErrors:
1963 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1964 for sError in asErrors]));
1965
1966 #
1967 # Scan the statements for local variables and call arguments (self.dVariables).
1968 #
1969 self.analyzeFindVariablesAndCallArgs(aoStmts);
1970
1971 #
1972 # Scan the code for IEM_CIMPL_F_ and other clues.
1973 #
1974 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1975 dEflStmts = {};
1976 self.analyzeCodeOperation(aoStmts, dEflStmts);
1977 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1978 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1979 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1980 self.error('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls', oGenerator);
1981
1982 #
1983 # Analyse EFLAGS related MCs and @opflmodify and friends.
1984 #
1985 if dEflStmts:
1986 oInstruction = self.oMcBlock.oInstruction; # iai.Instruction
1987 if ( oInstruction is None
1988 or (oInstruction.asFlTest is None and oInstruction.asFlModify is None)):
1989 sMcNames = '+'.join(dEflStmts.keys());
1990 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls
1991 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator);
1992 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts:
1993 if not oInstruction.asFlModify:
1994 if oInstruction.sMnemonic not in [ 'not', ]:
1995 self.error('Uses IEM_MC_COMMIT_EFLAGS but has no flags in @opflmodify!', oGenerator);
1996 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts
1997 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts
1998 or 'IEM_MC_CALL_CIMPL_2' in dEflStmts
1999 or 'IEM_MC_CALL_CIMPL_3' in dEflStmts
2000 or 'IEM_MC_CALL_CIMPL_4' in dEflStmts
2001 or 'IEM_MC_CALL_CIMPL_5' in dEflStmts ):
2002 if not oInstruction.asFlModify:
2003 self.error('Uses IEM_MC_CALL_CIMPL_x or IEM_MC_DEFER_TO_CIMPL_5_RET with IEM_CIMPL_F_STATUS_FLAGS '
2004 'or IEM_CIMPL_F_RFLAGS but has no flags in @opflmodify!', oGenerator);
2005 elif 'IEM_MC_REF_EFLAGS' not in dEflStmts:
2006 if not oInstruction.asFlTest:
2007 if oInstruction.sMnemonic not in [ 'not', ]:
2008 self.error('Expected @opfltest!', oGenerator);
2009 if oInstruction and oInstruction.asFlSet:
2010 for sFlag in oInstruction.asFlSet:
2011 if sFlag not in oInstruction.asFlModify:
2012 self.error('"%s" in @opflset but missing from @opflmodify (%s)!'
2013 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2014 if oInstruction and oInstruction.asFlClear:
2015 for sFlag in oInstruction.asFlClear:
2016 if sFlag not in oInstruction.asFlModify:
2017 self.error('"%s" in @opflclear but missing from @opflmodify (%s)!'
2018 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2019
2020 #
2021 # Create variations as needed.
2022 #
2023 if iai.McStmt.findStmtByNames(aoStmts,
2024 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
2025 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
2026 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
2027 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
2028 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
2029
2030 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
2031 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
2032 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
2033 'IEM_MC_FETCH_MEM_U32' : True,
2034 'IEM_MC_FETCH_MEM_U64' : True,
2035 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
2036 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
2037 'IEM_MC_STORE_MEM_U32' : True,
2038 'IEM_MC_STORE_MEM_U64' : True, }):
2039 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2040 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
2041 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2042 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
2043 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2044 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
2045 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2046 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
2047 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2048 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2049 else:
2050 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
2051 else:
2052 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2053 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
2054 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2055 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
2056 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2057 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
2058 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2059 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
2060 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2061 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2062 else:
2063 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
2064
2065 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2066 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
2067 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
2068 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
2069 asVariationsBase = asVariations;
2070 asVariations = [];
2071 for sVariation in asVariationsBase:
2072 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
2073 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
2074
2075 if not iai.McStmt.findStmtByNames(aoStmts,
2076 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
2077 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2078 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2079 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
2080 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
2081 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
2082 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
2083 }):
2084 asVariations = [sVariation for sVariation in asVariations
2085 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
2086
2087 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
2088
2089 # Dictionary variant of the list.
2090 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
2091
2092 #
2093 # Try annotate the threaded function name.
2094 #
2095 self.analyzeAndAnnotateName(aoStmts);
2096
2097 #
2098 # Continue the analysis on each variation.
2099 #
2100 for oVariation in self.aoVariations:
2101 oVariation.analyzeVariation(aoStmts);
2102
2103 return True;
2104
2105 ## Used by emitThreadedCallStmts.
2106 kdVariationsWithNeedForPrefixCheck = {
2107 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
2108 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
2109 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
2110 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
2111 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
2112 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
2113 ThreadedFunctionVariation.ksVariation_32_Flat: True,
2114 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
2115 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
2116 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
2117 };
2118
2119 def emitThreadedCallStmts(self, sBranch = None): # pylint: disable=too-many-statements
2120 """
2121 Worker for morphInputCode that returns a list of statements that emits
2122 the call to the threaded functions for the block.
2123
2124 The sBranch parameter is used with conditional branches where we'll emit
2125 different threaded calls depending on whether we're in the jump-taken or
2126 no-jump code path.
2127 """
2128 # Special case for only default variation:
2129 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
2130 assert not sBranch;
2131 return self.aoVariations[0].emitThreadedCallStmts(0);
2132
2133 #
2134 # Case statement sub-class.
2135 #
2136 dByVari = self.dVariations;
2137 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
2138 class Case:
2139 def __init__(self, sCond, sVarNm = None):
2140 self.sCond = sCond;
2141 self.sVarNm = sVarNm;
2142 self.oVar = dByVari[sVarNm] if sVarNm else None;
2143 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
2144
2145 def toCode(self):
2146 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2147 if self.aoBody:
2148 aoStmts.extend(self.aoBody);
2149 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
2150 return aoStmts;
2151
2152 def toFunctionAssignment(self):
2153 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2154 if self.aoBody:
2155 aoStmts.extend([
2156 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
2157 iai.McCppGeneric('break;', cchIndent = 8),
2158 ]);
2159 return aoStmts;
2160
2161 def isSame(self, oThat):
2162 if not self.aoBody: # fall thru always matches.
2163 return True;
2164 if len(self.aoBody) != len(oThat.aoBody):
2165 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
2166 return False;
2167 for iStmt, oStmt in enumerate(self.aoBody):
2168 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
2169 assert isinstance(oStmt, iai.McCppGeneric);
2170 assert not isinstance(oStmt, iai.McStmtCond);
2171 if isinstance(oStmt, iai.McStmtCond):
2172 return False;
2173 if oStmt.sName != oThatStmt.sName:
2174 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
2175 return False;
2176 if len(oStmt.asParams) != len(oThatStmt.asParams):
2177 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
2178 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
2179 return False;
2180 for iParam, sParam in enumerate(oStmt.asParams):
2181 if ( sParam != oThatStmt.asParams[iParam]
2182 and ( iParam != 1
2183 or not isinstance(oStmt, iai.McCppCall)
2184 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
2185 or sParam != self.oVar.getIndexName()
2186 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
2187 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
2188 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
2189 return False;
2190 return True;
2191
2192 #
2193 # Determine what we're switch on.
2194 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
2195 #
2196 fSimple = True;
2197 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
2198 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
2199 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
2200 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
2201 # is not writable in 32-bit mode (at least), thus the penalty mode
2202 # for any accesses via it (simpler this way).)
2203 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
2204 fSimple = False; # threaded functions.
2205 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
2206 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
2207 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
2208
2209 #
2210 # Generate the case statements.
2211 #
2212 # pylintx: disable=x
2213 aoCases = [];
2214 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
2215 assert not fSimple and not sBranch;
2216 aoCases.extend([
2217 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
2218 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
2219 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
2220 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
2221 ]);
2222 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
2223 aoCases.extend([
2224 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
2225 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
2226 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
2227 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
2228 ]);
2229 elif ThrdFnVar.ksVariation_64 in dByVari:
2230 assert fSimple and not sBranch;
2231 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
2232 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
2233 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
2234 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
2235 assert fSimple and sBranch;
2236 aoCases.append(Case('IEMMODE_64BIT',
2237 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
2238 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
2239 aoCases.append(Case('IEMMODE_64BIT | 32',
2240 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
2241
2242 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
2243 assert not fSimple and not sBranch;
2244 aoCases.extend([
2245 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
2246 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
2247 Case('IEMMODE_32BIT | 16', None), # fall thru
2248 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2249 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
2250 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
2251 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
2252 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
2253 ]);
2254 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
2255 aoCases.extend([
2256 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
2257 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
2258 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
2259 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2260 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
2261 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
2262 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
2263 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
2264 ]);
2265 elif ThrdFnVar.ksVariation_32 in dByVari:
2266 assert fSimple and not sBranch;
2267 aoCases.extend([
2268 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2269 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2270 ]);
2271 if ThrdFnVar.ksVariation_32f in dByVari:
2272 aoCases.extend([
2273 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2274 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2275 ]);
2276 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
2277 assert fSimple and sBranch;
2278 aoCases.extend([
2279 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2280 Case('IEMMODE_32BIT',
2281 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
2282 ]);
2283 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
2284 aoCases.extend([
2285 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2286 Case('IEMMODE_32BIT | 32',
2287 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
2288 ]);
2289
2290 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
2291 assert not fSimple and not sBranch;
2292 aoCases.extend([
2293 Case('IEMMODE_16BIT | 16', None), # fall thru
2294 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
2295 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
2296 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
2297 ]);
2298 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
2299 aoCases.extend([
2300 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
2301 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
2302 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
2303 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
2304 ]);
2305 elif ThrdFnVar.ksVariation_16 in dByVari:
2306 assert fSimple and not sBranch;
2307 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
2308 if ThrdFnVar.ksVariation_16f in dByVari:
2309 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
2310 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
2311 assert fSimple and sBranch;
2312 aoCases.append(Case('IEMMODE_16BIT',
2313 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
2314 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
2315 aoCases.append(Case('IEMMODE_16BIT | 32',
2316 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
2317
2318
2319 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
2320 if not fSimple:
2321 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
2322 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
2323 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
2324 if not fSimple:
2325 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
2326 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
2327
2328 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
2329 assert fSimple and sBranch;
2330 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2331 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
2332 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
2333 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
2334 assert fSimple and sBranch;
2335 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2336 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
2337 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
2338
2339 #
2340 # If the case bodies are all the same, except for the function called,
2341 # we can reduce the code size and hopefully compile time.
2342 #
2343 iFirstCaseWithBody = 0;
2344 while not aoCases[iFirstCaseWithBody].aoBody:
2345 iFirstCaseWithBody += 1
2346 fAllSameCases = True
2347 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
2348 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
2349 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
2350 if fAllSameCases:
2351 aoStmts = [
2352 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
2353 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2354 iai.McCppGeneric('{'),
2355 ];
2356 for oCase in aoCases:
2357 aoStmts.extend(oCase.toFunctionAssignment());
2358 aoStmts.extend([
2359 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2360 iai.McCppGeneric('}'),
2361 ]);
2362 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
2363
2364 else:
2365 #
2366 # Generate the generic switch statement.
2367 #
2368 aoStmts = [
2369 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2370 iai.McCppGeneric('{'),
2371 ];
2372 for oCase in aoCases:
2373 aoStmts.extend(oCase.toCode());
2374 aoStmts.extend([
2375 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2376 iai.McCppGeneric('}'),
2377 ]);
2378
2379 return aoStmts;
2380
2381 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2382 """
2383 Adjusts (& copies) the statements for the input/decoder so it will emit
2384 calls to the right threaded functions for each block.
2385
2386 Returns list/tree of statements (aoStmts is not modified) and updated
2387 fCallEmitted status.
2388 """
2389 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2390 aoDecoderStmts = [];
2391
2392 for iStmt, oStmt in enumerate(aoStmts):
2393 # Copy the statement. Make a deep copy to make sure we've got our own
2394 # copies of all instance variables, even if a bit overkill at the moment.
2395 oNewStmt = copy.deepcopy(oStmt);
2396 aoDecoderStmts.append(oNewStmt);
2397 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2398 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2399 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2400
2401 # If we haven't emitted the threaded function call yet, look for
2402 # statements which it would naturally follow or preceed.
2403 if not fCallEmitted:
2404 if not oStmt.isCppStmt():
2405 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2406 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2407 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2408 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2409 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2410 aoDecoderStmts.pop();
2411 if not fIsConditional:
2412 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2413 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2414 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2415 else:
2416 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2417 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2418 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2419 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2420 aoDecoderStmts.append(oNewStmt);
2421 fCallEmitted = True;
2422
2423 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2424 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2425 if not sBranchAnnotation:
2426 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2427 assert fIsConditional;
2428 aoDecoderStmts.pop();
2429 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2430 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2431 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2432 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2433 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2434 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2435 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2436 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2437 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2438 else:
2439 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2440 aoDecoderStmts.append(oNewStmt);
2441 fCallEmitted = True;
2442
2443 elif ( not fIsConditional
2444 and oStmt.fDecode
2445 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2446 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2447 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2448 fCallEmitted = True;
2449
2450 # Process branches of conditionals recursively.
2451 if isinstance(oStmt, iai.McStmtCond):
2452 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2453 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2454 if oStmt.aoElseBranch:
2455 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2456 fCallEmitted, cDepth + 1,
2457 oStmt.oElseBranchAnnotation);
2458 else:
2459 fCallEmitted2 = False;
2460 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2461
2462 if not fCallEmitted and cDepth == 0:
2463 self.raiseProblem('Unable to insert call to threaded function.');
2464
2465 return (aoDecoderStmts, fCallEmitted);
2466
2467
2468 def generateInputCode(self):
2469 """
2470 Modifies the input code.
2471 """
2472 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2473
2474 if len(self.oMcBlock.aoStmts) == 1:
2475 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2476 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2477 if self.dsCImplFlags:
2478 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2479 else:
2480 sCode += '0;\n';
2481 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2482 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2483 sIndent = ' ' * (min(cchIndent, 2) - 2);
2484 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2485 return sCode;
2486
2487 # IEM_MC_BEGIN/END block
2488 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2489 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2490 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2491 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2492 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2493
2494# Short alias for ThreadedFunctionVariation.
2495ThrdFnVar = ThreadedFunctionVariation;
2496
2497
2498class IEMThreadedGenerator(object):
2499 """
2500 The threaded code generator & annotator.
2501 """
2502
2503 def __init__(self):
2504 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2505 self.oOptions = None # type: argparse.Namespace
2506 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2507 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2508 self.cErrors = 0;
2509
2510 #
2511 # Error reporting.
2512 #
2513
2514 def rawError(self, sCompleteMessage):
2515 """ Output a raw error and increment the error counter. """
2516 print(sCompleteMessage, file = sys.stderr);
2517 self.cErrors += 1;
2518 return False;
2519
2520 #
2521 # Processing.
2522 #
2523
2524 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2525 """
2526 Process the input files.
2527 """
2528
2529 # Parse the files.
2530 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2531
2532 # Create threaded functions for the MC blocks.
2533 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2534
2535 # Analyze the threaded functions.
2536 dRawParamCounts = {};
2537 dMinParamCounts = {};
2538 for oThreadedFunction in self.aoThreadedFuncs:
2539 oThreadedFunction.analyze(self);
2540 for oVariation in oThreadedFunction.aoVariations:
2541 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2542 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2543 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2544 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2545 print('debug: %s params: %4s raw, %4s min'
2546 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2547 file = sys.stderr);
2548
2549 # Do another pass over the threaded functions to settle the name suffix.
2550 iThreadedFn = 0;
2551 while iThreadedFn < len(self.aoThreadedFuncs):
2552 oFunction = self.aoThreadedFuncs[iThreadedFn].oMcBlock.oFunction;
2553 assert oFunction;
2554 iThreadedFnNext = iThreadedFn + 1;
2555 dSubNames = { self.aoThreadedFuncs[iThreadedFn].sSubName: 1 };
2556 while ( iThreadedFnNext < len(self.aoThreadedFuncs)
2557 and self.aoThreadedFuncs[iThreadedFnNext].oMcBlock.oFunction == oFunction):
2558 dSubNames[self.aoThreadedFuncs[iThreadedFnNext].sSubName] = 1;
2559 iThreadedFnNext += 1;
2560 if iThreadedFnNext - iThreadedFn > len(dSubNames):
2561 iSubName = 0;
2562 while iThreadedFn + iSubName < iThreadedFnNext:
2563 self.aoThreadedFuncs[iThreadedFn + iSubName].sSubName += '_%s' % (iSubName,);
2564 iSubName += 1;
2565 iThreadedFn = iThreadedFnNext;
2566
2567 # Populate aidxFirstFunctions. This is ASSUMING that
2568 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2569 iThreadedFunction = 0;
2570 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2571 self.aidxFirstFunctions = [];
2572 for oParser in self.aoParsers:
2573 self.aidxFirstFunctions.append(iThreadedFunction);
2574
2575 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2576 iThreadedFunction += 1;
2577 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2578
2579 # Analyze the threaded functions and their variations for native recompilation.
2580 if fNativeRecompilerEnabled:
2581 ian.displayStatistics(self.aoThreadedFuncs, sHostArch);
2582
2583 # Gather arguments + variable statistics for the MC blocks.
2584 cMaxArgs = 0;
2585 cMaxVars = 0;
2586 cMaxVarsAndArgs = 0;
2587 cbMaxArgs = 0;
2588 cbMaxVars = 0;
2589 cbMaxVarsAndArgs = 0;
2590 for oThreadedFunction in self.aoThreadedFuncs:
2591 if oThreadedFunction.oMcBlock.cLocals >= 0:
2592 # Counts.
2593 assert oThreadedFunction.oMcBlock.cArgs >= 0;
2594 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
2595 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
2596 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
2597 if cMaxVarsAndArgs > 9:
2598 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2599 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2600 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
2601 # Calc stack allocation size:
2602 cbArgs = 0;
2603 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2604 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2605 cbVars = 0;
2606 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2607 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2608 cbMaxVars = max(cbMaxVars, cbVars);
2609 cbMaxArgs = max(cbMaxArgs, cbArgs);
2610 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2611 if cbMaxVarsAndArgs >= 0xc0:
2612 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2613 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2614
2615 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2616 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2617
2618 if self.cErrors > 0:
2619 print('fatal error: %u error%s during processing. Details above.'
2620 % (self.cErrors, 's' if self.cErrors > 1 else '',), file = sys.stderr);
2621 return False;
2622 return True;
2623
2624 #
2625 # Output
2626 #
2627
2628 def generateLicenseHeader(self):
2629 """
2630 Returns the lines for a license header.
2631 """
2632 return [
2633 '/*',
2634 ' * Autogenerated by $Id: IEMAllThrdPython.py 103613 2024-02-29 13:01:56Z vboxsync $ ',
2635 ' * Do not edit!',
2636 ' */',
2637 '',
2638 '/*',
2639 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2640 ' *',
2641 ' * This file is part of VirtualBox base platform packages, as',
2642 ' * available from https://www.virtualbox.org.',
2643 ' *',
2644 ' * This program is free software; you can redistribute it and/or',
2645 ' * modify it under the terms of the GNU General Public License',
2646 ' * as published by the Free Software Foundation, in version 3 of the',
2647 ' * License.',
2648 ' *',
2649 ' * This program is distributed in the hope that it will be useful, but',
2650 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2651 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2652 ' * General Public License for more details.',
2653 ' *',
2654 ' * You should have received a copy of the GNU General Public License',
2655 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2656 ' *',
2657 ' * The contents of this file may alternatively be used under the terms',
2658 ' * of the Common Development and Distribution License Version 1.0',
2659 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2660 ' * in the VirtualBox distribution, in which case the provisions of the',
2661 ' * CDDL are applicable instead of those of the GPL.',
2662 ' *',
2663 ' * You may elect to license modified versions of this file under the',
2664 ' * terms and conditions of either the GPL or the CDDL or both.',
2665 ' *',
2666 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2667 ' */',
2668 '',
2669 '',
2670 '',
2671 ];
2672
2673 ## List of built-in threaded functions with user argument counts and
2674 ## whether it has a native recompiler implementation.
2675 katBltIns = (
2676 ( 'Nop', 0, True ),
2677 ( 'LogCpuState', 0, True ),
2678
2679 ( 'DeferToCImpl0', 2, True ),
2680 ( 'CheckIrq', 0, True ),
2681 ( 'CheckMode', 1, True ),
2682 ( 'CheckHwInstrBps', 0, False ),
2683 ( 'CheckCsLim', 1, True ),
2684
2685 ( 'CheckCsLimAndOpcodes', 3, True ),
2686 ( 'CheckOpcodes', 3, True ),
2687 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2688
2689 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2690 ( 'CheckPcAndOpcodes', 3, True ),
2691 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2692
2693 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2694 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2695 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2696
2697 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2698 ( 'CheckOpcodesLoadingTlb', 3, True ),
2699 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2700
2701 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2702 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2703 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2704
2705 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2706 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2707 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2708 );
2709
2710 def generateThreadedFunctionsHeader(self, oOut):
2711 """
2712 Generates the threaded functions header file.
2713 Returns success indicator.
2714 """
2715
2716 asLines = self.generateLicenseHeader();
2717
2718 # Generate the threaded function table indexes.
2719 asLines += [
2720 'typedef enum IEMTHREADEDFUNCS',
2721 '{',
2722 ' kIemThreadedFunc_Invalid = 0,',
2723 '',
2724 ' /*',
2725 ' * Predefined',
2726 ' */',
2727 ];
2728 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2729
2730 iThreadedFunction = 1 + len(self.katBltIns);
2731 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2732 asLines += [
2733 '',
2734 ' /*',
2735 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2736 ' */',
2737 ];
2738 for oThreadedFunction in self.aoThreadedFuncs:
2739 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2740 if oVariation:
2741 iThreadedFunction += 1;
2742 oVariation.iEnumValue = iThreadedFunction;
2743 asLines.append(' ' + oVariation.getIndexName() + ',');
2744 asLines += [
2745 ' kIemThreadedFunc_End',
2746 '} IEMTHREADEDFUNCS;',
2747 '',
2748 ];
2749
2750 # Prototype the function table.
2751 asLines += [
2752 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2753 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2754 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2755 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2756 '#endif',
2757 '#if defined(IN_RING3)',
2758 'extern const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End];',
2759 '#endif',
2760 ];
2761
2762 oOut.write('\n'.join(asLines));
2763 return True;
2764
2765 ksBitsToIntMask = {
2766 1: "UINT64_C(0x1)",
2767 2: "UINT64_C(0x3)",
2768 4: "UINT64_C(0xf)",
2769 8: "UINT64_C(0xff)",
2770 16: "UINT64_C(0xffff)",
2771 32: "UINT64_C(0xffffffff)",
2772 };
2773
2774 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2775 """
2776 Outputs code for unpacking parameters.
2777 This is shared by the threaded and native code generators.
2778 """
2779 aasVars = [];
2780 for aoRefs in oVariation.dParamRefs.values():
2781 oRef = aoRefs[0];
2782 if oRef.sType[0] != 'P':
2783 cBits = g_kdTypeInfo[oRef.sType][0];
2784 sType = g_kdTypeInfo[oRef.sType][2];
2785 else:
2786 cBits = 64;
2787 sType = oRef.sType;
2788
2789 sTypeDecl = sType + ' const';
2790
2791 if cBits == 64:
2792 assert oRef.offNewParam == 0;
2793 if sType == 'uint64_t':
2794 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2795 else:
2796 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2797 elif oRef.offNewParam == 0:
2798 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2799 else:
2800 sUnpack = '(%s)((%s >> %s) & %s);' \
2801 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2802
2803 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2804
2805 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2806 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2807 acchVars = [0, 0, 0, 0, 0];
2808 for asVar in aasVars:
2809 for iCol, sStr in enumerate(asVar):
2810 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2811 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2812 for asVar in sorted(aasVars):
2813 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2814 return True;
2815
2816 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2817 def generateThreadedFunctionsSource(self, oOut):
2818 """
2819 Generates the threaded functions source file.
2820 Returns success indicator.
2821 """
2822
2823 asLines = self.generateLicenseHeader();
2824 oOut.write('\n'.join(asLines));
2825
2826 #
2827 # Emit the function definitions.
2828 #
2829 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2830 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2831 oOut.write( '\n'
2832 + '\n'
2833 + '\n'
2834 + '\n'
2835 + '/*' + '*' * 128 + '\n'
2836 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2837 + '*' * 128 + '*/\n');
2838
2839 for oThreadedFunction in self.aoThreadedFuncs:
2840 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2841 if oVariation:
2842 oMcBlock = oThreadedFunction.oMcBlock;
2843
2844 # Function header
2845 oOut.write( '\n'
2846 + '\n'
2847 + '/**\n'
2848 + ' * #%u: %s at line %s offset %s in %s%s\n'
2849 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2850 os.path.split(oMcBlock.sSrcFile)[1],
2851 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2852 + ' */\n'
2853 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2854 + '{\n');
2855
2856 # Unpack parameters.
2857 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2858
2859 # RT_NOREF for unused parameters.
2860 if oVariation.cMinParams < g_kcThreadedParams:
2861 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2862
2863 # Now for the actual statements.
2864 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2865
2866 oOut.write('}\n');
2867
2868
2869 #
2870 # Generate the output tables in parallel.
2871 #
2872 asFuncTable = [
2873 '/**',
2874 ' * Function pointer table.',
2875 ' */',
2876 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2877 '{',
2878 ' /*Invalid*/ NULL,',
2879 ];
2880 asArgCntTab = [
2881 '/**',
2882 ' * Argument count table.',
2883 ' */',
2884 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2885 '{',
2886 ' 0, /*Invalid*/',
2887 ];
2888 asNameTable = [
2889 '/**',
2890 ' * Function name table.',
2891 ' */',
2892 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2893 '{',
2894 ' "Invalid",',
2895 ];
2896 asStatTable = [
2897 '/**',
2898 ' * Function statistics name table.',
2899 ' */',
2900 'const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End] =',
2901 '{',
2902 ' NULL,',
2903 ];
2904 aasTables = (asFuncTable, asArgCntTab, asNameTable, asStatTable,);
2905
2906 for asTable in aasTables:
2907 asTable.extend((
2908 '',
2909 ' /*',
2910 ' * Predefined.',
2911 ' */',
2912 ));
2913 for sFuncNm, cArgs, _ in self.katBltIns:
2914 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2915 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2916 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2917 asStatTable.append(' "BltIn/%s",' % (sFuncNm,));
2918
2919 iThreadedFunction = 1 + len(self.katBltIns);
2920 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2921 for asTable in aasTables:
2922 asTable.extend((
2923 '',
2924 ' /*',
2925 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2926 ' */',
2927 ));
2928 for oThreadedFunction in self.aoThreadedFuncs:
2929 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2930 if oVariation:
2931 iThreadedFunction += 1;
2932 assert oVariation.iEnumValue == iThreadedFunction;
2933 sName = oVariation.getThreadedFunctionName();
2934 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2935 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2936 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2937 asStatTable.append(' "%s",' % (oVariation.getThreadedFunctionStatisticsName(),));
2938
2939 for asTable in aasTables:
2940 asTable.append('};');
2941
2942 #
2943 # Output the tables.
2944 #
2945 oOut.write( '\n'
2946 + '\n');
2947 oOut.write('\n'.join(asFuncTable));
2948 oOut.write( '\n'
2949 + '\n'
2950 + '\n');
2951 oOut.write('\n'.join(asArgCntTab));
2952 oOut.write( '\n'
2953 + '\n'
2954 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2955 oOut.write('\n'.join(asNameTable));
2956 oOut.write( '\n'
2957 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2958 + '\n'
2959 + '\n'
2960 + '#if defined(IN_RING3)\n');
2961 oOut.write('\n'.join(asStatTable));
2962 oOut.write( '\n'
2963 + '#endif /* IN_RING3 */\n');
2964
2965 return True;
2966
2967 def generateNativeFunctionsHeader(self, oOut):
2968 """
2969 Generates the native recompiler functions header file.
2970 Returns success indicator.
2971 """
2972 if not self.oOptions.fNativeRecompilerEnabled:
2973 return True;
2974
2975 asLines = self.generateLicenseHeader();
2976
2977 # Prototype the function table.
2978 asLines += [
2979 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2980 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
2981 '',
2982 ];
2983
2984 # Emit indicators as to which of the builtin functions have a native
2985 # recompiler function and which not. (We only really need this for
2986 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
2987 for atBltIn in self.katBltIns:
2988 if atBltIn[1]:
2989 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
2990 else:
2991 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
2992
2993 # Emit prototypes for the builtin functions we use in tables.
2994 asLines += [
2995 '',
2996 '/* Prototypes for built-in functions used in the above tables. */',
2997 ];
2998 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2999 if fHaveRecompFunc:
3000 asLines += [
3001 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
3002 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
3003 ];
3004
3005 oOut.write('\n'.join(asLines));
3006 return True;
3007
3008 def generateNativeFunctionsSource(self, oOut):
3009 """
3010 Generates the native recompiler functions source file.
3011 Returns success indicator.
3012 """
3013 if not self.oOptions.fNativeRecompilerEnabled:
3014 return True;
3015
3016 #
3017 # The file header.
3018 #
3019 oOut.write('\n'.join(self.generateLicenseHeader()));
3020
3021 #
3022 # Emit the functions.
3023 #
3024 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3025 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3026 oOut.write( '\n'
3027 + '\n'
3028 + '\n'
3029 + '\n'
3030 + '/*' + '*' * 128 + '\n'
3031 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3032 + '*' * 128 + '*/\n');
3033
3034 for oThreadedFunction in self.aoThreadedFuncs:
3035 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3036 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3037 oMcBlock = oThreadedFunction.oMcBlock;
3038
3039 # Function header
3040 oOut.write( '\n'
3041 + '\n'
3042 + '/**\n'
3043 + ' * #%u: %s at line %s offset %s in %s%s\n'
3044 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3045 os.path.split(oMcBlock.sSrcFile)[1],
3046 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3047 + ' */\n'
3048 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
3049 + '{\n');
3050
3051 # Unpack parameters.
3052 self.generateFunctionParameterUnpacking(oVariation, oOut,
3053 ('pCallEntry->auParams[0]',
3054 'pCallEntry->auParams[1]',
3055 'pCallEntry->auParams[2]',));
3056
3057 # Now for the actual statements.
3058 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3059
3060 oOut.write('}\n');
3061
3062 #
3063 # Output the function table.
3064 #
3065 oOut.write( '\n'
3066 + '\n'
3067 + '/*\n'
3068 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3069 + ' */\n'
3070 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
3071 + '{\n'
3072 + ' /*Invalid*/ NULL,'
3073 + '\n'
3074 + ' /*\n'
3075 + ' * Predefined.\n'
3076 + ' */\n'
3077 );
3078 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3079 if fHaveRecompFunc:
3080 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
3081 else:
3082 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3083
3084 iThreadedFunction = 1 + len(self.katBltIns);
3085 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3086 oOut.write( ' /*\n'
3087 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3088 + ' */\n');
3089 for oThreadedFunction in self.aoThreadedFuncs:
3090 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3091 if oVariation:
3092 iThreadedFunction += 1;
3093 assert oVariation.iEnumValue == iThreadedFunction;
3094 sName = oVariation.getNativeFunctionName();
3095 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3096 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3097 else:
3098 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3099
3100 oOut.write( '};\n'
3101 + '\n');
3102 return True;
3103
3104 def generateNativeLivenessSource(self, oOut):
3105 """
3106 Generates the native recompiler liveness analysis functions source file.
3107 Returns success indicator.
3108 """
3109 if not self.oOptions.fNativeRecompilerEnabled:
3110 return True;
3111
3112 #
3113 # The file header.
3114 #
3115 oOut.write('\n'.join(self.generateLicenseHeader()));
3116
3117 #
3118 # Emit the functions.
3119 #
3120 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3121 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3122 oOut.write( '\n'
3123 + '\n'
3124 + '\n'
3125 + '\n'
3126 + '/*' + '*' * 128 + '\n'
3127 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3128 + '*' * 128 + '*/\n');
3129
3130 for oThreadedFunction in self.aoThreadedFuncs:
3131 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3132 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3133 oMcBlock = oThreadedFunction.oMcBlock;
3134
3135 # Function header
3136 oOut.write( '\n'
3137 + '\n'
3138 + '/**\n'
3139 + ' * #%u: %s at line %s offset %s in %s%s\n'
3140 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3141 os.path.split(oMcBlock.sSrcFile)[1],
3142 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3143 + ' */\n'
3144 + 'static IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
3145 + '{\n');
3146
3147 # Unpack parameters.
3148 self.generateFunctionParameterUnpacking(oVariation, oOut,
3149 ('pCallEntry->auParams[0]',
3150 'pCallEntry->auParams[1]',
3151 'pCallEntry->auParams[2]',));
3152 asNoRefs = []; #[ 'RT_NOREF_PV(pReNative);', ];
3153 for aoRefs in oVariation.dParamRefs.values():
3154 asNoRefs.append('RT_NOREF_PV(%s);' % (aoRefs[0].sNewName,));
3155 oOut.write(' %s\n' % (' '.join(asNoRefs),));
3156
3157 # Now for the actual statements.
3158 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3159
3160 oOut.write('}\n');
3161
3162 #
3163 # Output the function table.
3164 #
3165 oOut.write( '\n'
3166 + '\n'
3167 + '/*\n'
3168 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3169 + ' */\n'
3170 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
3171 + '{\n'
3172 + ' /*Invalid*/ NULL,'
3173 + '\n'
3174 + ' /*\n'
3175 + ' * Predefined.\n'
3176 + ' */\n'
3177 );
3178 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3179 if fHaveRecompFunc:
3180 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
3181 else:
3182 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3183
3184 iThreadedFunction = 1 + len(self.katBltIns);
3185 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3186 oOut.write( ' /*\n'
3187 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3188 + ' */\n');
3189 for oThreadedFunction in self.aoThreadedFuncs:
3190 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3191 if oVariation:
3192 iThreadedFunction += 1;
3193 assert oVariation.iEnumValue == iThreadedFunction;
3194 sName = oVariation.getLivenessFunctionName();
3195 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3196 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3197 else:
3198 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3199
3200 oOut.write( '};\n'
3201 + '\n');
3202 return True;
3203
3204
3205 def getThreadedFunctionByIndex(self, idx):
3206 """
3207 Returns a ThreadedFunction object for the given index. If the index is
3208 out of bounds, a dummy is returned.
3209 """
3210 if idx < len(self.aoThreadedFuncs):
3211 return self.aoThreadedFuncs[idx];
3212 return ThreadedFunction.dummyInstance();
3213
3214 def generateModifiedInput(self, oOut, idxFile):
3215 """
3216 Generates the combined modified input source/header file.
3217 Returns success indicator.
3218 """
3219 #
3220 # File header and assert assumptions.
3221 #
3222 oOut.write('\n'.join(self.generateLicenseHeader()));
3223 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
3224
3225 #
3226 # Iterate all parsers (input files) and output the ones related to the
3227 # file set given by idxFile.
3228 #
3229 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
3230 # Is this included in the file set?
3231 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
3232 fInclude = -1;
3233 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
3234 if sSrcBaseFile == aoInfo[0].lower():
3235 fInclude = aoInfo[2] in (-1, idxFile);
3236 break;
3237 if fInclude is not True:
3238 assert fInclude is False;
3239 continue;
3240
3241 # Output it.
3242 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
3243
3244 iThreadedFunction = self.aidxFirstFunctions[idxParser];
3245 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3246 iLine = 0;
3247 while iLine < len(oParser.asLines):
3248 sLine = oParser.asLines[iLine];
3249 iLine += 1; # iBeginLine and iEndLine are 1-based.
3250
3251 # Can we pass it thru?
3252 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
3253 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
3254 oOut.write(sLine);
3255 #
3256 # Single MC block. Just extract it and insert the replacement.
3257 #
3258 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
3259 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
3260 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
3261 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
3262 sModified = oThreadedFunction.generateInputCode().strip();
3263 oOut.write(sModified);
3264
3265 iLine = oThreadedFunction.oMcBlock.iEndLine;
3266 sLine = oParser.asLines[iLine - 1];
3267 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
3268 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
3269 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
3270 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
3271
3272 # Advance
3273 iThreadedFunction += 1;
3274 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3275 #
3276 # Macro expansion line that have sublines and may contain multiple MC blocks.
3277 #
3278 else:
3279 offLine = 0;
3280 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
3281 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
3282
3283 sModified = oThreadedFunction.generateInputCode().strip();
3284 assert ( sModified.startswith('IEM_MC_BEGIN')
3285 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
3286 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
3287 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
3288 ), 'sModified="%s"' % (sModified,);
3289 oOut.write(sModified);
3290
3291 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
3292
3293 # Advance
3294 iThreadedFunction += 1;
3295 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3296
3297 # Last line segment.
3298 if offLine < len(sLine):
3299 oOut.write(sLine[offLine : ]);
3300
3301 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
3302
3303 return True;
3304
3305 def generateModifiedInput1(self, oOut):
3306 """
3307 Generates the combined modified input source/header file, part 1.
3308 Returns success indicator.
3309 """
3310 return self.generateModifiedInput(oOut, 1);
3311
3312 def generateModifiedInput2(self, oOut):
3313 """
3314 Generates the combined modified input source/header file, part 2.
3315 Returns success indicator.
3316 """
3317 return self.generateModifiedInput(oOut, 2);
3318
3319 def generateModifiedInput3(self, oOut):
3320 """
3321 Generates the combined modified input source/header file, part 3.
3322 Returns success indicator.
3323 """
3324 return self.generateModifiedInput(oOut, 3);
3325
3326 def generateModifiedInput4(self, oOut):
3327 """
3328 Generates the combined modified input source/header file, part 4.
3329 Returns success indicator.
3330 """
3331 return self.generateModifiedInput(oOut, 4);
3332
3333
3334 #
3335 # Main
3336 #
3337
3338 def main(self, asArgs):
3339 """
3340 C-like main function.
3341 Returns exit code.
3342 """
3343
3344 #
3345 # Parse arguments
3346 #
3347 sScriptDir = os.path.dirname(__file__);
3348 oParser = argparse.ArgumentParser(add_help = False);
3349 oParser.add_argument('asInFiles',
3350 metavar = 'input.cpp.h',
3351 nargs = '*',
3352 default = [os.path.join(sScriptDir, aoInfo[0])
3353 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
3354 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
3355 oParser.add_argument('--host-arch',
3356 metavar = 'arch',
3357 dest = 'sHostArch',
3358 action = 'store',
3359 default = None,
3360 help = 'The host architecture.');
3361
3362 oParser.add_argument('--out-thrd-funcs-hdr',
3363 metavar = 'file-thrd-funcs.h',
3364 dest = 'sOutFileThrdFuncsHdr',
3365 action = 'store',
3366 default = '-',
3367 help = 'The output header file for the threaded functions.');
3368 oParser.add_argument('--out-thrd-funcs-cpp',
3369 metavar = 'file-thrd-funcs.cpp',
3370 dest = 'sOutFileThrdFuncsCpp',
3371 action = 'store',
3372 default = '-',
3373 help = 'The output C++ file for the threaded functions.');
3374 oParser.add_argument('--out-n8ve-funcs-hdr',
3375 metavar = 'file-n8tv-funcs.h',
3376 dest = 'sOutFileN8veFuncsHdr',
3377 action = 'store',
3378 default = '-',
3379 help = 'The output header file for the native recompiler functions.');
3380 oParser.add_argument('--out-n8ve-funcs-cpp',
3381 metavar = 'file-n8tv-funcs.cpp',
3382 dest = 'sOutFileN8veFuncsCpp',
3383 action = 'store',
3384 default = '-',
3385 help = 'The output C++ file for the native recompiler functions.');
3386 oParser.add_argument('--out-n8ve-liveness-cpp',
3387 metavar = 'file-n8tv-liveness.cpp',
3388 dest = 'sOutFileN8veLivenessCpp',
3389 action = 'store',
3390 default = '-',
3391 help = 'The output C++ file for the native recompiler liveness analysis functions.');
3392 oParser.add_argument('--native',
3393 dest = 'fNativeRecompilerEnabled',
3394 action = 'store_true',
3395 default = False,
3396 help = 'Enables generating the files related to native recompilation.');
3397 oParser.add_argument('--out-mod-input1',
3398 metavar = 'file-instr.cpp.h',
3399 dest = 'sOutFileModInput1',
3400 action = 'store',
3401 default = '-',
3402 help = 'The output C++/header file for modified input instruction files part 1.');
3403 oParser.add_argument('--out-mod-input2',
3404 metavar = 'file-instr.cpp.h',
3405 dest = 'sOutFileModInput2',
3406 action = 'store',
3407 default = '-',
3408 help = 'The output C++/header file for modified input instruction files part 2.');
3409 oParser.add_argument('--out-mod-input3',
3410 metavar = 'file-instr.cpp.h',
3411 dest = 'sOutFileModInput3',
3412 action = 'store',
3413 default = '-',
3414 help = 'The output C++/header file for modified input instruction files part 3.');
3415 oParser.add_argument('--out-mod-input4',
3416 metavar = 'file-instr.cpp.h',
3417 dest = 'sOutFileModInput4',
3418 action = 'store',
3419 default = '-',
3420 help = 'The output C++/header file for modified input instruction files part 4.');
3421 oParser.add_argument('--help', '-h', '-?',
3422 action = 'help',
3423 help = 'Display help and exit.');
3424 oParser.add_argument('--version', '-V',
3425 action = 'version',
3426 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3427 % (__version__.split()[1], iai.__version__.split()[1],),
3428 help = 'Displays the version/revision of the script and exit.');
3429 self.oOptions = oParser.parse_args(asArgs[1:]);
3430 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3431
3432 #
3433 # Process the instructions specified in the IEM sources.
3434 #
3435 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3436 #
3437 # Generate the output files.
3438 #
3439 aaoOutputFiles = (
3440 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
3441 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
3442 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
3443 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
3444 ( self.oOptions.sOutFileN8veLivenessCpp, self.generateNativeLivenessSource ),
3445 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
3446 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
3447 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
3448 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
3449 );
3450 fRc = True;
3451 for sOutFile, fnGenMethod in aaoOutputFiles:
3452 if sOutFile == '-':
3453 fRc = fnGenMethod(sys.stdout) and fRc;
3454 else:
3455 try:
3456 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3457 except Exception as oXcpt:
3458 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3459 return 1;
3460 fRc = fnGenMethod(oOut) and fRc;
3461 oOut.close();
3462 if fRc:
3463 return 0;
3464
3465 return 1;
3466
3467
3468if __name__ == '__main__':
3469 sys.exit(IEMThreadedGenerator().main(sys.argv));
3470
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette