VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 103158

Last change on this file since 103158 was 102977, checked in by vboxsync, 16 months ago

VMM/IEM: Implemented generic fallback for misaligned x86 locking that is not compatible with the host. Using the existing split-lock solution with VINF_EM_EMULATE_SPLIT_LOCK from bugref:10052. We keep ignoring the 'lock' prefix in the recompiler for single CPU VMs (now also on amd64 hosts). bugref:10547

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 149.0 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 102977 2024-01-19 23:11:30Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 102977 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
133## @{
134g_ksFinishAnnotation_Advance = 'Advance';
135g_ksFinishAnnotation_RelJmp = 'RelJmp';
136g_ksFinishAnnotation_SetJmp = 'SetJmp';
137g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
138## @}
139
140
141class ThreadedParamRef(object):
142 """
143 A parameter reference for a threaded function.
144 """
145
146 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
147 ## The name / reference in the original code.
148 self.sOrgRef = sOrgRef;
149 ## Normalized name to deal with spaces in macro invocations and such.
150 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
151 ## Indicates that sOrgRef may not match the parameter.
152 self.fCustomRef = sStdRef is not None;
153 ## The type (typically derived).
154 self.sType = sType;
155 ## The statement making the reference.
156 self.oStmt = oStmt;
157 ## The parameter containing the references. None if implicit.
158 self.iParam = iParam;
159 ## The offset in the parameter of the reference.
160 self.offParam = offParam;
161
162 ## The variable name in the threaded function.
163 self.sNewName = 'x';
164 ## The this is packed into.
165 self.iNewParam = 99;
166 ## The bit offset in iNewParam.
167 self.offNewParam = 1024
168
169
170class ThreadedFunctionVariation(object):
171 """ Threaded function variation. """
172
173 ## @name Variations.
174 ## These variations will match translation block selection/distinctions as well.
175 ## @{
176 # pylint: disable=line-too-long
177 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
178 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
179 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
180 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
181 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
182 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
183 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
184 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
185 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
186 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
187 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
188 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
189 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
190 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
191 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
192 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
193 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
194 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
195 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
196 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
197 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
198 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
199 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
200 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
201 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
202 ksVariation_64 = '_64'; ##< 64-bit mode code.
203 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
204 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
205 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
206 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
207 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
208 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
209 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
210 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
211 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
212 # pylint: enable=line-too-long
213 kasVariations = (
214 ksVariation_Default,
215 ksVariation_16,
216 ksVariation_16f,
217 ksVariation_16_Jmp,
218 ksVariation_16f_Jmp,
219 ksVariation_16_NoJmp,
220 ksVariation_16f_NoJmp,
221 ksVariation_16_Addr32,
222 ksVariation_16f_Addr32,
223 ksVariation_16_Pre386,
224 ksVariation_16f_Pre386,
225 ksVariation_16_Pre386_Jmp,
226 ksVariation_16f_Pre386_Jmp,
227 ksVariation_16_Pre386_NoJmp,
228 ksVariation_16f_Pre386_NoJmp,
229 ksVariation_32,
230 ksVariation_32f,
231 ksVariation_32_Jmp,
232 ksVariation_32f_Jmp,
233 ksVariation_32_NoJmp,
234 ksVariation_32f_NoJmp,
235 ksVariation_32_Flat,
236 ksVariation_32f_Flat,
237 ksVariation_32_Addr16,
238 ksVariation_32f_Addr16,
239 ksVariation_64,
240 ksVariation_64f,
241 ksVariation_64_Jmp,
242 ksVariation_64f_Jmp,
243 ksVariation_64_NoJmp,
244 ksVariation_64f_NoJmp,
245 ksVariation_64_FsGs,
246 ksVariation_64f_FsGs,
247 ksVariation_64_Addr32,
248 ksVariation_64f_Addr32,
249 );
250 kasVariationsWithoutAddress = (
251 ksVariation_16,
252 ksVariation_16f,
253 ksVariation_16_Pre386,
254 ksVariation_16f_Pre386,
255 ksVariation_32,
256 ksVariation_32f,
257 ksVariation_64,
258 ksVariation_64f,
259 );
260 kasVariationsWithoutAddressNot286 = (
261 ksVariation_16,
262 ksVariation_16f,
263 ksVariation_32,
264 ksVariation_32f,
265 ksVariation_64,
266 ksVariation_64f,
267 );
268 kasVariationsWithoutAddressNot286Not64 = (
269 ksVariation_16,
270 ksVariation_16f,
271 ksVariation_32,
272 ksVariation_32f,
273 );
274 kasVariationsWithoutAddressNot64 = (
275 ksVariation_16,
276 ksVariation_16f,
277 ksVariation_16_Pre386,
278 ksVariation_16f_Pre386,
279 ksVariation_32,
280 ksVariation_32f,
281 );
282 kasVariationsWithoutAddressOnly64 = (
283 ksVariation_64,
284 ksVariation_64f,
285 );
286 kasVariationsWithAddress = (
287 ksVariation_16,
288 ksVariation_16f,
289 ksVariation_16_Addr32,
290 ksVariation_16f_Addr32,
291 ksVariation_16_Pre386,
292 ksVariation_16f_Pre386,
293 ksVariation_32,
294 ksVariation_32f,
295 ksVariation_32_Flat,
296 ksVariation_32f_Flat,
297 ksVariation_32_Addr16,
298 ksVariation_32f_Addr16,
299 ksVariation_64,
300 ksVariation_64f,
301 ksVariation_64_FsGs,
302 ksVariation_64f_FsGs,
303 ksVariation_64_Addr32,
304 ksVariation_64f_Addr32,
305 );
306 kasVariationsWithAddressNot286 = (
307 ksVariation_16,
308 ksVariation_16f,
309 ksVariation_16_Addr32,
310 ksVariation_16f_Addr32,
311 ksVariation_32,
312 ksVariation_32f,
313 ksVariation_32_Flat,
314 ksVariation_32f_Flat,
315 ksVariation_32_Addr16,
316 ksVariation_32f_Addr16,
317 ksVariation_64,
318 ksVariation_64f,
319 ksVariation_64_FsGs,
320 ksVariation_64f_FsGs,
321 ksVariation_64_Addr32,
322 ksVariation_64f_Addr32,
323 );
324 kasVariationsWithAddressNot286Not64 = (
325 ksVariation_16,
326 ksVariation_16f,
327 ksVariation_16_Addr32,
328 ksVariation_16f_Addr32,
329 ksVariation_32,
330 ksVariation_32f,
331 ksVariation_32_Flat,
332 ksVariation_32f_Flat,
333 ksVariation_32_Addr16,
334 ksVariation_32f_Addr16,
335 );
336 kasVariationsWithAddressNot64 = (
337 ksVariation_16,
338 ksVariation_16f,
339 ksVariation_16_Addr32,
340 ksVariation_16f_Addr32,
341 ksVariation_16_Pre386,
342 ksVariation_16f_Pre386,
343 ksVariation_32,
344 ksVariation_32f,
345 ksVariation_32_Flat,
346 ksVariation_32f_Flat,
347 ksVariation_32_Addr16,
348 ksVariation_32f_Addr16,
349 );
350 kasVariationsWithAddressOnly64 = (
351 ksVariation_64,
352 ksVariation_64f,
353 ksVariation_64_FsGs,
354 ksVariation_64f_FsGs,
355 ksVariation_64_Addr32,
356 ksVariation_64f_Addr32,
357 );
358 kasVariationsOnlyPre386 = (
359 ksVariation_16_Pre386,
360 ksVariation_16f_Pre386,
361 );
362 kasVariationsEmitOrder = (
363 ksVariation_Default,
364 ksVariation_64,
365 ksVariation_64f,
366 ksVariation_64_Jmp,
367 ksVariation_64f_Jmp,
368 ksVariation_64_NoJmp,
369 ksVariation_64f_NoJmp,
370 ksVariation_64_FsGs,
371 ksVariation_64f_FsGs,
372 ksVariation_32_Flat,
373 ksVariation_32f_Flat,
374 ksVariation_32,
375 ksVariation_32f,
376 ksVariation_32_Jmp,
377 ksVariation_32f_Jmp,
378 ksVariation_32_NoJmp,
379 ksVariation_32f_NoJmp,
380 ksVariation_16,
381 ksVariation_16f,
382 ksVariation_16_Jmp,
383 ksVariation_16f_Jmp,
384 ksVariation_16_NoJmp,
385 ksVariation_16f_NoJmp,
386 ksVariation_16_Addr32,
387 ksVariation_16f_Addr32,
388 ksVariation_16_Pre386,
389 ksVariation_16f_Pre386,
390 ksVariation_16_Pre386_Jmp,
391 ksVariation_16f_Pre386_Jmp,
392 ksVariation_16_Pre386_NoJmp,
393 ksVariation_16f_Pre386_NoJmp,
394 ksVariation_32_Addr16,
395 ksVariation_32f_Addr16,
396 ksVariation_64_Addr32,
397 ksVariation_64f_Addr32,
398 );
399 kdVariationNames = {
400 ksVariation_Default: 'defer-to-cimpl',
401 ksVariation_16: '16-bit',
402 ksVariation_16f: '16-bit w/ eflag checking and clearing',
403 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
404 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
405 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
406 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
407 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
408 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
409 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
410 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
411 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
412 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
413 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
414 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
415 ksVariation_32: '32-bit',
416 ksVariation_32f: '32-bit w/ eflag checking and clearing',
417 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
418 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
419 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
420 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
421 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
422 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
423 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
424 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
425 ksVariation_64: '64-bit',
426 ksVariation_64f: '64-bit w/ eflag checking and clearing',
427 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
428 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
429 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
430 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
431 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
432 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
433 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
434 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
435 };
436 kdVariationsWithEflagsCheckingAndClearing = {
437 ksVariation_16f: True,
438 ksVariation_16f_Jmp: True,
439 ksVariation_16f_NoJmp: True,
440 ksVariation_16f_Addr32: True,
441 ksVariation_16f_Pre386: True,
442 ksVariation_16f_Pre386_Jmp: True,
443 ksVariation_16f_Pre386_NoJmp: True,
444 ksVariation_32f: True,
445 ksVariation_32f_Jmp: True,
446 ksVariation_32f_NoJmp: True,
447 ksVariation_32f_Flat: True,
448 ksVariation_32f_Addr16: True,
449 ksVariation_64f: True,
450 ksVariation_64f_Jmp: True,
451 ksVariation_64f_NoJmp: True,
452 ksVariation_64f_FsGs: True,
453 ksVariation_64f_Addr32: True,
454 };
455 kdVariationsOnly64NoFlags = {
456 ksVariation_64: True,
457 ksVariation_64_Jmp: True,
458 ksVariation_64_NoJmp: True,
459 ksVariation_64_FsGs: True,
460 ksVariation_64_Addr32: True,
461 };
462 kdVariationsOnly64WithFlags = {
463 ksVariation_64f: True,
464 ksVariation_64f_Jmp: True,
465 ksVariation_64f_NoJmp: True,
466 ksVariation_64f_FsGs: True,
467 ksVariation_64f_Addr32: True,
468 };
469 kdVariationsOnlyPre386NoFlags = {
470 ksVariation_16_Pre386: True,
471 ksVariation_16_Pre386_Jmp: True,
472 ksVariation_16_Pre386_NoJmp: True,
473 };
474 kdVariationsOnlyPre386WithFlags = {
475 ksVariation_16f_Pre386: True,
476 ksVariation_16f_Pre386_Jmp: True,
477 ksVariation_16f_Pre386_NoJmp: True,
478 };
479 kdVariationsWithFlatAddress = {
480 ksVariation_32_Flat: True,
481 ksVariation_32f_Flat: True,
482 ksVariation_64: True,
483 ksVariation_64f: True,
484 ksVariation_64_Addr32: True,
485 ksVariation_64f_Addr32: True,
486 };
487 kdVariationsWithFlatStackAddress = {
488 ksVariation_32_Flat: True,
489 ksVariation_32f_Flat: True,
490 ksVariation_64: True,
491 ksVariation_64f: True,
492 ksVariation_64_FsGs: True,
493 ksVariation_64f_FsGs: True,
494 ksVariation_64_Addr32: True,
495 ksVariation_64f_Addr32: True,
496 };
497 kdVariationsWithFlat64StackAddress = {
498 ksVariation_64: True,
499 ksVariation_64f: True,
500 ksVariation_64_FsGs: True,
501 ksVariation_64f_FsGs: True,
502 ksVariation_64_Addr32: True,
503 ksVariation_64f_Addr32: True,
504 };
505 kdVariationsWithFlatAddr16 = {
506 ksVariation_16: True,
507 ksVariation_16f: True,
508 ksVariation_16_Pre386: True,
509 ksVariation_16f_Pre386: True,
510 ksVariation_32_Addr16: True,
511 ksVariation_32f_Addr16: True,
512 };
513 kdVariationsWithFlatAddr32No64 = {
514 ksVariation_16_Addr32: True,
515 ksVariation_16f_Addr32: True,
516 ksVariation_32: True,
517 ksVariation_32f: True,
518 ksVariation_32_Flat: True,
519 ksVariation_32f_Flat: True,
520 };
521 kdVariationsWithAddressOnly64 = {
522 ksVariation_64: True,
523 ksVariation_64f: True,
524 ksVariation_64_FsGs: True,
525 ksVariation_64f_FsGs: True,
526 ksVariation_64_Addr32: True,
527 ksVariation_64f_Addr32: True,
528 };
529 kdVariationsWithConditional = {
530 ksVariation_16_Jmp: True,
531 ksVariation_16_NoJmp: True,
532 ksVariation_16_Pre386_Jmp: True,
533 ksVariation_16_Pre386_NoJmp: True,
534 ksVariation_32_Jmp: True,
535 ksVariation_32_NoJmp: True,
536 ksVariation_64_Jmp: True,
537 ksVariation_64_NoJmp: True,
538 ksVariation_16f_Jmp: True,
539 ksVariation_16f_NoJmp: True,
540 ksVariation_16f_Pre386_Jmp: True,
541 ksVariation_16f_Pre386_NoJmp: True,
542 ksVariation_32f_Jmp: True,
543 ksVariation_32f_NoJmp: True,
544 ksVariation_64f_Jmp: True,
545 ksVariation_64f_NoJmp: True,
546 };
547 kdVariationsWithConditionalNoJmp = {
548 ksVariation_16_NoJmp: True,
549 ksVariation_16_Pre386_NoJmp: True,
550 ksVariation_32_NoJmp: True,
551 ksVariation_64_NoJmp: True,
552 ksVariation_16f_NoJmp: True,
553 ksVariation_16f_Pre386_NoJmp: True,
554 ksVariation_32f_NoJmp: True,
555 ksVariation_64f_NoJmp: True,
556 };
557 kdVariationsOnlyPre386 = {
558 ksVariation_16_Pre386: True,
559 ksVariation_16f_Pre386: True,
560 ksVariation_16_Pre386_Jmp: True,
561 ksVariation_16f_Pre386_Jmp: True,
562 ksVariation_16_Pre386_NoJmp: True,
563 ksVariation_16f_Pre386_NoJmp: True,
564 };
565 ## @}
566
567 ## IEM_CIMPL_F_XXX flags that we know.
568 ## The value indicates whether it terminates the TB or not. The goal is to
569 ## improve the recompiler so all but END_TB will be False.
570 ##
571 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
572 kdCImplFlags = {
573 'IEM_CIMPL_F_MODE': False,
574 'IEM_CIMPL_F_BRANCH_DIRECT': False,
575 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
576 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
577 'IEM_CIMPL_F_BRANCH_FAR': True,
578 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
579 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
580 'IEM_CIMPL_F_BRANCH_STACK': False,
581 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
582 'IEM_CIMPL_F_RFLAGS': False,
583 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
584 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
585 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
586 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
587 'IEM_CIMPL_F_STATUS_FLAGS': False,
588 'IEM_CIMPL_F_VMEXIT': False,
589 'IEM_CIMPL_F_FPU': False,
590 'IEM_CIMPL_F_REP': False,
591 'IEM_CIMPL_F_IO': False,
592 'IEM_CIMPL_F_END_TB': True,
593 'IEM_CIMPL_F_XCPT': True,
594 'IEM_CIMPL_F_CALLS_CIMPL': False,
595 'IEM_CIMPL_F_CALLS_AIMPL': False,
596 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
597 };
598
599 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
600 self.oParent = oThreadedFunction # type: ThreadedFunction
601 ##< ksVariation_Xxxx.
602 self.sVariation = sVariation
603
604 ## Threaded function parameter references.
605 self.aoParamRefs = [] # type: List[ThreadedParamRef]
606 ## Unique parameter references.
607 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
608 ## Minimum number of parameters to the threaded function.
609 self.cMinParams = 0;
610
611 ## List/tree of statements for the threaded function.
612 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
613
614 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
615 self.iEnumValue = -1;
616
617 ## Native recompilation details for this variation.
618 self.oNativeRecomp = None;
619
620 def getIndexName(self):
621 sName = self.oParent.oMcBlock.sFunction;
622 if sName.startswith('iemOp_'):
623 sName = sName[len('iemOp_'):];
624 if self.oParent.oMcBlock.iInFunction == 0:
625 return 'kIemThreadedFunc_%s%s' % ( sName, self.sVariation, );
626 return 'kIemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
627
628 def getThreadedFunctionName(self):
629 sName = self.oParent.oMcBlock.sFunction;
630 if sName.startswith('iemOp_'):
631 sName = sName[len('iemOp_'):];
632 if self.oParent.oMcBlock.iInFunction == 0:
633 return 'iemThreadedFunc_%s%s' % ( sName, self.sVariation, );
634 return 'iemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
635
636 def getNativeFunctionName(self):
637 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
638
639 def getShortName(self):
640 sName = self.oParent.oMcBlock.sFunction;
641 if sName.startswith('iemOp_'):
642 sName = sName[len('iemOp_'):];
643 if self.oParent.oMcBlock.iInFunction == 0:
644 return '%s%s' % ( sName, self.sVariation, );
645 return '%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
646
647 def isWithFlagsCheckingAndClearingVariation(self):
648 """
649 Checks if this is a variation that checks and clears EFLAGS.
650 """
651 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
652
653 #
654 # Analysis and code morphing.
655 #
656
657 def raiseProblem(self, sMessage):
658 """ Raises a problem. """
659 self.oParent.raiseProblem(sMessage);
660
661 def warning(self, sMessage):
662 """ Emits a warning. """
663 self.oParent.warning(sMessage);
664
665 def analyzeReferenceToType(self, sRef):
666 """
667 Translates a variable or structure reference to a type.
668 Returns type name.
669 Raises exception if unable to figure it out.
670 """
671 ch0 = sRef[0];
672 if ch0 == 'u':
673 if sRef.startswith('u32'):
674 return 'uint32_t';
675 if sRef.startswith('u8') or sRef == 'uReg':
676 return 'uint8_t';
677 if sRef.startswith('u64'):
678 return 'uint64_t';
679 if sRef.startswith('u16'):
680 return 'uint16_t';
681 elif ch0 == 'b':
682 return 'uint8_t';
683 elif ch0 == 'f':
684 return 'bool';
685 elif ch0 == 'i':
686 if sRef.startswith('i8'):
687 return 'int8_t';
688 if sRef.startswith('i16'):
689 return 'int16_t';
690 if sRef.startswith('i32'):
691 return 'int32_t';
692 if sRef.startswith('i64'):
693 return 'int64_t';
694 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
695 return 'uint8_t';
696 elif ch0 == 'p':
697 if sRef.find('-') < 0:
698 return 'uintptr_t';
699 if sRef.startswith('pVCpu->iem.s.'):
700 sField = sRef[len('pVCpu->iem.s.') : ];
701 if sField in g_kdIemFieldToType:
702 if g_kdIemFieldToType[sField][0]:
703 return g_kdIemFieldToType[sField][0];
704 elif ch0 == 'G' and sRef.startswith('GCPtr'):
705 return 'uint64_t';
706 elif ch0 == 'e':
707 if sRef == 'enmEffOpSize':
708 return 'IEMMODE';
709 elif ch0 == 'o':
710 if sRef.startswith('off32'):
711 return 'uint32_t';
712 elif sRef == 'cbFrame': # enter
713 return 'uint16_t';
714 elif sRef == 'cShift': ## @todo risky
715 return 'uint8_t';
716
717 self.raiseProblem('Unknown reference: %s' % (sRef,));
718 return None; # Shut up pylint 2.16.2.
719
720 def analyzeCallToType(self, sFnRef):
721 """
722 Determins the type of an indirect function call.
723 """
724 assert sFnRef[0] == 'p';
725
726 #
727 # Simple?
728 #
729 if sFnRef.find('-') < 0:
730 oDecoderFunction = self.oParent.oMcBlock.oFunction;
731
732 # Try the argument list of the function defintion macro invocation first.
733 iArg = 2;
734 while iArg < len(oDecoderFunction.asDefArgs):
735 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
736 return oDecoderFunction.asDefArgs[iArg - 1];
737 iArg += 1;
738
739 # Then check out line that includes the word and looks like a variable declaration.
740 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
741 for sLine in oDecoderFunction.asLines:
742 oMatch = oRe.match(sLine);
743 if oMatch:
744 if not oMatch.group(1).startswith('const'):
745 return oMatch.group(1);
746 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
747
748 #
749 # Deal with the pImpl->pfnXxx:
750 #
751 elif sFnRef.startswith('pImpl->pfn'):
752 sMember = sFnRef[len('pImpl->') : ];
753 sBaseType = self.analyzeCallToType('pImpl');
754 offBits = sMember.rfind('U') + 1;
755 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
756 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
757 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
758 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
759 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
760 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
761 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
762 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
763 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
764 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
765
766 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
767
768 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
769 return None; # Shut up pylint 2.16.2.
770
771 def analyze8BitGRegStmt(self, oStmt):
772 """
773 Gets the 8-bit general purpose register access details of the given statement.
774 ASSUMES the statement is one accessing an 8-bit GREG.
775 """
776 idxReg = 0;
777 if ( oStmt.sName.find('_FETCH_') > 0
778 or oStmt.sName.find('_REF_') > 0
779 or oStmt.sName.find('_TO_LOCAL') > 0):
780 idxReg = 1;
781
782 sRegRef = oStmt.asParams[idxReg];
783 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
784 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
785 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
786 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
787 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
788 else:
789 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
790
791 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
792 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
793 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
794 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
795 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
796 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
797 else:
798 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
799 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
800 sStdRef = 'bOther8Ex';
801
802 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
803 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
804 return (idxReg, sOrgExpr, sStdRef);
805
806
807 ## Maps memory related MCs to info for FLAT conversion.
808 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
809 ## segmentation checking for every memory access. Only applied to access
810 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
811 ## the latter (CS) is just to keep things simple (we could safely fetch via
812 ## it, but only in 64-bit mode could we safely write via it, IIRC).
813 kdMemMcToFlatInfo = {
814 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
815 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
816 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
817 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
818 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
819 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
820 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
821 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
822 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
823 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
824 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
825 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
826 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
827 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
828 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
829 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
830 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
831 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
832 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
833 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
834 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
835 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
836 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
837 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
838 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
839 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
840 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
841 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
842 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
843 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
844 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
845 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
846 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
847 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
848 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
849 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
850 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
851 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
852 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
853 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
854 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
855 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
856 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
857 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
858 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
859 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
860 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
861 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
862 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
863 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
864 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
865 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
866 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
867 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
868 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
869 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
870 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
871 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
872 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
873 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
874 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
875 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
876 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
877 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
878 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
879 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
880 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
881 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
882 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
883 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
884 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
885 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
886 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
887 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
888 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
889 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
890 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
891 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
892 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
893 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
894 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
895 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
896 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
897 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
898 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
899 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
900 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
901 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
902 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
903 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
904 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
905 };
906
907 kdMemMcToFlatInfoStack = {
908 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
909 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
910 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
911 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
912 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
913 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
914 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
915 };
916
917 kdThreadedCalcRmEffAddrMcByVariation = {
918 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
919 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
920 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
921 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
922 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
923 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
924 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
925 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
926 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
927 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
928 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
929 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
930 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
931 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
932 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
933 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
934 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
935 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
936 };
937
938 def analyzeMorphStmtForThreaded(self, aoStmts, iParamRef = 0):
939 """
940 Transforms (copy) the statements into those for the threaded function.
941
942 Returns list/tree of statements (aoStmts is not modified) and the new
943 iParamRef value.
944 """
945 #
946 # We'll be traversing aoParamRefs in parallel to the statements, so we
947 # must match the traversal in analyzeFindThreadedParamRefs exactly.
948 #
949 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
950 aoThreadedStmts = [];
951 for oStmt in aoStmts:
952 # Skip C++ statements that is purely related to decoding.
953 if not oStmt.isCppStmt() or not oStmt.fDecode:
954 # Copy the statement. Make a deep copy to make sure we've got our own
955 # copies of all instance variables, even if a bit overkill at the moment.
956 oNewStmt = copy.deepcopy(oStmt);
957 aoThreadedStmts.append(oNewStmt);
958 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
959
960 # If the statement has parameter references, process the relevant parameters.
961 # We grab the references relevant to this statement and apply them in reserve order.
962 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
963 iParamRefFirst = iParamRef;
964 while True:
965 iParamRef += 1;
966 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
967 break;
968
969 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
970 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
971 oCurRef = self.aoParamRefs[iCurRef];
972 if oCurRef.iParam is not None:
973 assert oCurRef.oStmt == oStmt;
974 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
975 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
976 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
977 or oCurRef.fCustomRef), \
978 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
979 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
980 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
981 + oCurRef.sNewName \
982 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
983
984 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
985 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
986 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
987 assert len(oNewStmt.asParams) == 3;
988
989 if self.sVariation in self.kdVariationsWithFlatAddr16:
990 oNewStmt.asParams = [
991 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
992 ];
993 else:
994 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
995 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
996 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
997
998 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
999 oNewStmt.asParams = [
1000 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1001 ];
1002 else:
1003 oNewStmt.asParams = [
1004 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1005 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1006 ];
1007 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1008 elif ( oNewStmt.sName
1009 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1010 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1011 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH', )):
1012 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1013 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1014 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1015 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
1016 and self.sVariation not in self.kdVariationsOnlyPre386):
1017 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1018 oNewStmt.sName += '_THREADED';
1019 if self.sVariation in self.kdVariationsOnly64NoFlags:
1020 oNewStmt.sName += '_PC64';
1021 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1022 oNewStmt.sName += '_PC64_WITH_FLAGS';
1023 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1024 oNewStmt.sName += '_PC16';
1025 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1026 oNewStmt.sName += '_PC16_WITH_FLAGS';
1027 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1028 assert self.sVariation != self.ksVariation_Default;
1029 oNewStmt.sName += '_PC32';
1030 else:
1031 oNewStmt.sName += '_PC32_WITH_FLAGS';
1032
1033 # This is making the wrong branch of conditionals break out of the TB.
1034 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1035 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1036 sExitTbStatus = 'VINF_SUCCESS';
1037 if self.sVariation in self.kdVariationsWithConditional:
1038 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1039 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1040 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1041 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1042 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1043 oNewStmt.asParams.append(sExitTbStatus);
1044
1045 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1046 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1047 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1048 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1049 oNewStmt.sName += '_THREADED';
1050
1051 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1052 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1053 oNewStmt.sName += '_THREADED';
1054 oNewStmt.idxFn += 1;
1055 oNewStmt.idxParams += 1;
1056 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1057
1058 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1059 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1060 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1061 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1062 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1063 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1064 if idxEffSeg != -1:
1065 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1066 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1067 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1068 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1069 oNewStmt.asParams.pop(idxEffSeg);
1070 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1071
1072 # ... PUSH and POP also needs flat variants, but these differ a little.
1073 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1074 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1075 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1076 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1077 self.kdVariationsWithFlat64StackAddress)];
1078
1079
1080 # Process branches of conditionals recursively.
1081 if isinstance(oStmt, iai.McStmtCond):
1082 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, iParamRef);
1083 if oStmt.aoElseBranch:
1084 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch, iParamRef);
1085
1086 return (aoThreadedStmts, iParamRef);
1087
1088
1089 def analyzeConsolidateThreadedParamRefs(self):
1090 """
1091 Consolidate threaded function parameter references into a dictionary
1092 with lists of the references to each variable/field.
1093 """
1094 # Gather unique parameters.
1095 self.dParamRefs = {};
1096 for oRef in self.aoParamRefs:
1097 if oRef.sStdRef not in self.dParamRefs:
1098 self.dParamRefs[oRef.sStdRef] = [oRef,];
1099 else:
1100 self.dParamRefs[oRef.sStdRef].append(oRef);
1101
1102 # Generate names for them for use in the threaded function.
1103 dParamNames = {};
1104 for sName, aoRefs in self.dParamRefs.items():
1105 # Morph the reference expression into a name.
1106 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1107 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1108 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1109 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1110 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1111 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1112 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1113 else:
1114 sName += 'P';
1115
1116 # Ensure it's unique.
1117 if sName in dParamNames:
1118 for i in range(10):
1119 if sName + str(i) not in dParamNames:
1120 sName += str(i);
1121 break;
1122 dParamNames[sName] = True;
1123
1124 # Update all the references.
1125 for oRef in aoRefs:
1126 oRef.sNewName = sName;
1127
1128 # Organize them by size too for the purpose of optimize them.
1129 dBySize = {} # type: Dict[str, str]
1130 for sStdRef, aoRefs in self.dParamRefs.items():
1131 if aoRefs[0].sType[0] != 'P':
1132 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1133 assert(cBits <= 64);
1134 else:
1135 cBits = 64;
1136
1137 if cBits not in dBySize:
1138 dBySize[cBits] = [sStdRef,]
1139 else:
1140 dBySize[cBits].append(sStdRef);
1141
1142 # Pack the parameters as best as we can, starting with the largest ones
1143 # and ASSUMING a 64-bit parameter size.
1144 self.cMinParams = 0;
1145 offNewParam = 0;
1146 for cBits in sorted(dBySize.keys(), reverse = True):
1147 for sStdRef in dBySize[cBits]:
1148 if offNewParam == 0 or offNewParam + cBits > 64:
1149 self.cMinParams += 1;
1150 offNewParam = cBits;
1151 else:
1152 offNewParam += cBits;
1153 assert(offNewParam <= 64);
1154
1155 for oRef in self.dParamRefs[sStdRef]:
1156 oRef.iNewParam = self.cMinParams - 1;
1157 oRef.offNewParam = offNewParam - cBits;
1158
1159 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1160 if self.cMinParams >= 4:
1161 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1162 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1163
1164 return True;
1165
1166 ksHexDigits = '0123456789abcdefABCDEF';
1167
1168 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1169 """
1170 Scans the statements for things that have to passed on to the threaded
1171 function (populates self.aoParamRefs).
1172 """
1173 for oStmt in aoStmts:
1174 # Some statements we can skip alltogether.
1175 if isinstance(oStmt, iai.McCppPreProc):
1176 continue;
1177 if oStmt.isCppStmt() and oStmt.fDecode:
1178 continue;
1179 if oStmt.sName in ('IEM_MC_BEGIN',):
1180 continue;
1181
1182 if isinstance(oStmt, iai.McStmtVar):
1183 if oStmt.sValue is None:
1184 continue;
1185 aiSkipParams = { 0: True, 1: True, 3: True };
1186 else:
1187 aiSkipParams = {};
1188
1189 # Several statements have implicit parameters and some have different parameters.
1190 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1191 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
1192 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1193 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1194 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1195 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1196
1197 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
1198 and self.sVariation not in self.kdVariationsOnlyPre386):
1199 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1200
1201 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1202 # This is being pretty presumptive about bRm always being the RM byte...
1203 assert len(oStmt.asParams) == 3;
1204 assert oStmt.asParams[1] == 'bRm';
1205
1206 if self.sVariation in self.kdVariationsWithFlatAddr16:
1207 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1208 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1209 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1210 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1211 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1212 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1213 'uint8_t', oStmt, sStdRef = 'bSib'));
1214 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1215 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1216 else:
1217 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1218 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1219 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1220 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1221 'uint8_t', oStmt, sStdRef = 'bSib'));
1222 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1223 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1224 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1225 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1226 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1227
1228 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1229 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1230 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1231 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint16_t', oStmt, idxReg, sStdRef = sStdRef));
1232 aiSkipParams[idxReg] = True; # Skip the parameter below.
1233
1234 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1235 if ( self.sVariation in self.kdVariationsWithFlatAddress
1236 and oStmt.sName in self.kdMemMcToFlatInfo
1237 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1238 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1239
1240 # Inspect the target of calls to see if we need to pass down a
1241 # function pointer or function table pointer for it to work.
1242 if isinstance(oStmt, iai.McStmtCall):
1243 if oStmt.sFn[0] == 'p':
1244 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1245 elif ( oStmt.sFn[0] != 'i'
1246 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1247 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1248 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1249 aiSkipParams[oStmt.idxFn] = True;
1250
1251 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1252 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1253 assert oStmt.idxFn == 2;
1254 aiSkipParams[0] = True;
1255
1256
1257 # Check all the parameters for bogus references.
1258 for iParam, sParam in enumerate(oStmt.asParams):
1259 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1260 # The parameter may contain a C expression, so we have to try
1261 # extract the relevant bits, i.e. variables and fields while
1262 # ignoring operators and parentheses.
1263 offParam = 0;
1264 while offParam < len(sParam):
1265 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1266 ch = sParam[offParam];
1267 if ch.isalpha() or ch == '_':
1268 offStart = offParam;
1269 offParam += 1;
1270 while offParam < len(sParam):
1271 ch = sParam[offParam];
1272 if not ch.isalnum() and ch != '_' and ch != '.':
1273 if ch != '-' or sParam[offParam + 1] != '>':
1274 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1275 if ( ch == '('
1276 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1277 offParam += len('(pVM)->') - 1;
1278 else:
1279 break;
1280 offParam += 1;
1281 offParam += 1;
1282 sRef = sParam[offStart : offParam];
1283
1284 # For register references, we pass the full register indexes instead as macros
1285 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1286 # threaded function will be more efficient if we just pass the register index
1287 # as a 4-bit param.
1288 if ( sRef.startswith('IEM_GET_MODRM')
1289 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1290 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1291 if sParam[offParam] != '(':
1292 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1293 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1294 if asMacroParams is None:
1295 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1296 offParam = offCloseParam + 1;
1297 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1298 oStmt, iParam, offStart));
1299
1300 # We can skip known variables.
1301 elif sRef in self.oParent.dVariables:
1302 pass;
1303
1304 # Skip certain macro invocations.
1305 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1306 'IEM_GET_GUEST_CPU_FEATURES',
1307 'IEM_IS_GUEST_CPU_AMD',
1308 'IEM_IS_16BIT_CODE',
1309 'IEM_IS_32BIT_CODE',
1310 'IEM_IS_64BIT_CODE',
1311 ):
1312 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1313 if sParam[offParam] != '(':
1314 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1315 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1316 if asMacroParams is None:
1317 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1318 offParam = offCloseParam + 1;
1319
1320 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1321 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1322 'IEM_IS_16BIT_CODE',
1323 'IEM_IS_32BIT_CODE',
1324 'IEM_IS_64BIT_CODE',
1325 ):
1326 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1327 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1328 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1329 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1330 offParam += 1;
1331
1332 # Skip constants, globals, types (casts), sizeof and macros.
1333 elif ( sRef.startswith('IEM_OP_PRF_')
1334 or sRef.startswith('IEM_ACCESS_')
1335 or sRef.startswith('IEMINT_')
1336 or sRef.startswith('X86_GREG_')
1337 or sRef.startswith('X86_SREG_')
1338 or sRef.startswith('X86_EFL_')
1339 or sRef.startswith('X86_FSW_')
1340 or sRef.startswith('X86_FCW_')
1341 or sRef.startswith('X86_XCPT_')
1342 or sRef.startswith('IEMMODE_')
1343 or sRef.startswith('IEM_F_')
1344 or sRef.startswith('IEM_CIMPL_F_')
1345 or sRef.startswith('g_')
1346 or sRef.startswith('iemAImpl_')
1347 or sRef.startswith('kIemNativeGstReg_')
1348 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1349 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1350 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1351 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1352 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1353 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1354 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1355 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1356 'NIL_RTGCPTR',) ):
1357 pass;
1358
1359 # Skip certain macro invocations.
1360 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1361 elif ( ( '.' not in sRef
1362 and '-' not in sRef
1363 and sRef not in ('pVCpu', ) )
1364 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1365 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1366 oStmt, iParam, offStart));
1367 # Number.
1368 elif ch.isdigit():
1369 if ( ch == '0'
1370 and offParam + 2 <= len(sParam)
1371 and sParam[offParam + 1] in 'xX'
1372 and sParam[offParam + 2] in self.ksHexDigits ):
1373 offParam += 2;
1374 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1375 offParam += 1;
1376 else:
1377 while offParam < len(sParam) and sParam[offParam].isdigit():
1378 offParam += 1;
1379 # Comment?
1380 elif ( ch == '/'
1381 and offParam + 4 <= len(sParam)
1382 and sParam[offParam + 1] == '*'):
1383 offParam += 2;
1384 offNext = sParam.find('*/', offParam);
1385 if offNext < offParam:
1386 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1387 offParam = offNext + 2;
1388 # Whatever else.
1389 else:
1390 offParam += 1;
1391
1392 # Traverse the branches of conditionals.
1393 if isinstance(oStmt, iai.McStmtCond):
1394 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1395 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1396 return True;
1397
1398 def analyzeVariation(self, aoStmts):
1399 """
1400 2nd part of the analysis, done on each variation.
1401
1402 The variations may differ in parameter requirements and will end up with
1403 slightly different MC sequences. Thus this is done on each individually.
1404
1405 Returns dummy True - raises exception on trouble.
1406 """
1407 # Now scan the code for variables and field references that needs to
1408 # be passed to the threaded function because they are related to the
1409 # instruction decoding.
1410 self.analyzeFindThreadedParamRefs(aoStmts);
1411 self.analyzeConsolidateThreadedParamRefs();
1412
1413 # Morph the statement stream for the block into what we'll be using in the threaded function.
1414 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts);
1415 if iParamRef != len(self.aoParamRefs):
1416 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1417
1418 return True;
1419
1420 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1421 """
1422 Produces generic C++ statments that emits a call to the thread function
1423 variation and any subsequent checks that may be necessary after that.
1424
1425 The sCallVarNm is the name of the variable with the threaded function
1426 to call. This is for the case where all the variations have the same
1427 parameters and only the threaded function number differs.
1428 """
1429 aoStmts = [
1430 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1431 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1432 cchIndent = cchIndent), # Scope and a hook for various stuff.
1433 ];
1434
1435 # The call to the threaded function.
1436 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1437 for iParam in range(self.cMinParams):
1438 asFrags = [];
1439 for aoRefs in self.dParamRefs.values():
1440 oRef = aoRefs[0];
1441 if oRef.iNewParam == iParam:
1442 sCast = '(uint64_t)'
1443 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1444 sCast = '(uint64_t)(u' + oRef.sType + ')';
1445 if oRef.offNewParam == 0:
1446 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1447 else:
1448 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1449 assert asFrags;
1450 asCallArgs.append(' | '.join(asFrags));
1451
1452 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1453
1454 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1455 # emit this mode check from the compilation loop. On the
1456 # plus side, this means we eliminate unnecessary call at
1457 # end of the TB. :-)
1458 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1459 ## mask and maybe emit additional checks.
1460 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1461 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1462 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1463 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1464 # cchIndent = cchIndent));
1465
1466 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1467 if not sCImplFlags:
1468 sCImplFlags = '0'
1469 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1470
1471 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1472 # indicates we should do so.
1473 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1474 asEndTbFlags = [];
1475 asTbBranchedFlags = [];
1476 for sFlag in self.oParent.dsCImplFlags:
1477 if self.kdCImplFlags[sFlag] is True:
1478 asEndTbFlags.append(sFlag);
1479 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1480 asTbBranchedFlags.append(sFlag);
1481 if ( asTbBranchedFlags
1482 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1483 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1484 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1485 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1486 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1487 if asEndTbFlags:
1488 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1489 cchIndent = cchIndent));
1490
1491 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1492 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1493
1494 return aoStmts;
1495
1496
1497class ThreadedFunction(object):
1498 """
1499 A threaded function.
1500 """
1501
1502 def __init__(self, oMcBlock: iai.McBlock) -> None:
1503 self.oMcBlock = oMcBlock # type: iai.McBlock
1504 # The remaining fields are only useful after analyze() has been called:
1505 ## Variations for this block. There is at least one.
1506 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1507 ## Variation dictionary containing the same as aoVariations.
1508 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1509 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1510 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1511 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1512 ## and those determined by analyzeCodeOperation().
1513 self.dsCImplFlags = {} # type: Dict[str, bool]
1514
1515 @staticmethod
1516 def dummyInstance():
1517 """ Gets a dummy instance. """
1518 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1519 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1520
1521 def hasWithFlagsCheckingAndClearingVariation(self):
1522 """
1523 Check if there is one or more with flags checking and clearing
1524 variations for this threaded function.
1525 """
1526 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1527 if sVarWithFlags in self.dVariations:
1528 return True;
1529 return False;
1530
1531 #
1532 # Analysis and code morphing.
1533 #
1534
1535 def raiseProblem(self, sMessage):
1536 """ Raises a problem. """
1537 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1538
1539 def warning(self, sMessage):
1540 """ Emits a warning. """
1541 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1542
1543 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1544 """ Scans the statements for MC variables and call arguments. """
1545 for oStmt in aoStmts:
1546 if isinstance(oStmt, iai.McStmtVar):
1547 if oStmt.sVarName in self.dVariables:
1548 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1549 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1550
1551 # There shouldn't be any variables or arguments declared inside if/
1552 # else blocks, but scan them too to be on the safe side.
1553 if isinstance(oStmt, iai.McStmtCond):
1554 #cBefore = len(self.dVariables);
1555 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1556 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1557 #if len(self.dVariables) != cBefore:
1558 # raise Exception('Variables/arguments defined in conditional branches!');
1559 return True;
1560
1561 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], fSeenConditional = False) -> bool:
1562 """
1563 Analyzes the code looking clues as to additional side-effects.
1564
1565 Currently this is simply looking for branching and adding the relevant
1566 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1567 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1568
1569 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
1570
1571 Returns annotation on return style.
1572 """
1573 sAnnotation = None;
1574 for oStmt in aoStmts:
1575 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1576 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1577 assert not fSeenConditional;
1578 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1579 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1580 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1581 if fSeenConditional:
1582 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1583
1584 # Check for CIMPL and AIMPL calls.
1585 if oStmt.sName.startswith('IEM_MC_CALL_'):
1586 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1587 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1588 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1589 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1590 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1591 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1592 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1593 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1594 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1595 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1596 else:
1597 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1598
1599 # Check for return statements.
1600 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH',):
1601 assert sAnnotation is None;
1602 sAnnotation = g_ksFinishAnnotation_Advance;
1603 elif oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1604 'IEM_MC_REL_JMP_S32_AND_FINISH',):
1605 assert sAnnotation is None;
1606 sAnnotation = g_ksFinishAnnotation_RelJmp;
1607 elif oStmt.sName in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1608 'IEM_MC_SET_RIP_U64_AND_FINISH',):
1609 assert sAnnotation is None;
1610 sAnnotation = g_ksFinishAnnotation_SetJmp;
1611 elif oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1612 assert sAnnotation is None;
1613 sAnnotation = g_ksFinishAnnotation_DeferToCImpl;
1614
1615 # Process branches of conditionals recursively.
1616 if isinstance(oStmt, iai.McStmtCond):
1617 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, True);
1618 if oStmt.aoElseBranch:
1619 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, True);
1620
1621 return sAnnotation;
1622
1623 def analyze(self):
1624 """
1625 Analyzes the code, identifying the number of parameters it requires and such.
1626
1627 Returns dummy True - raises exception on trouble.
1628 """
1629
1630 # Check the block for errors before we proceed (will decode it).
1631 asErrors = self.oMcBlock.check();
1632 if asErrors:
1633 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1634 for sError in asErrors]));
1635
1636 # Decode the block into a list/tree of McStmt objects.
1637 aoStmts = self.oMcBlock.decode();
1638
1639 # Scan the statements for local variables and call arguments (self.dVariables).
1640 self.analyzeFindVariablesAndCallArgs(aoStmts);
1641
1642 # Scan the code for IEM_CIMPL_F_ and other clues.
1643 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1644 self.analyzeCodeOperation(aoStmts);
1645 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1646 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1647 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1648 self.raiseProblem('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls');
1649
1650 # Create variations as needed.
1651 if iai.McStmt.findStmtByNames(aoStmts,
1652 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
1653 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
1654 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
1655 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
1656 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
1657
1658 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
1659 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
1660 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
1661 'IEM_MC_FETCH_MEM_U32' : True,
1662 'IEM_MC_FETCH_MEM_U64' : True,
1663 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
1664 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
1665 'IEM_MC_STORE_MEM_U32' : True,
1666 'IEM_MC_STORE_MEM_U64' : True, }):
1667 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1668 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
1669 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1670 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
1671 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1672 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
1673 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1674 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
1675 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1676 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1677 else:
1678 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
1679 else:
1680 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1681 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
1682 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1683 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
1684 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1685 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
1686 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1687 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
1688 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1689 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1690 else:
1691 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
1692
1693 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
1694 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
1695 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
1696 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
1697 asVariationsBase = asVariations;
1698 asVariations = [];
1699 for sVariation in asVariationsBase:
1700 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
1701 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
1702
1703 if not iai.McStmt.findStmtByNames(aoStmts,
1704 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
1705 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
1706 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
1707 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
1708 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
1709 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
1710 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
1711 }):
1712 asVariations = [sVariation for sVariation in asVariations
1713 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
1714
1715 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
1716
1717 # Dictionary variant of the list.
1718 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
1719
1720 # Continue the analysis on each variation.
1721 for oVariation in self.aoVariations:
1722 oVariation.analyzeVariation(aoStmts);
1723
1724 return True;
1725
1726 ## Used by emitThreadedCallStmts.
1727 kdVariationsWithNeedForPrefixCheck = {
1728 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
1729 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
1730 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
1731 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
1732 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
1733 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
1734 ThreadedFunctionVariation.ksVariation_32_Flat: True,
1735 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
1736 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
1737 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
1738 };
1739
1740 def emitThreadedCallStmts(self, sBranch = None): # pylint: disable=too-many-statements
1741 """
1742 Worker for morphInputCode that returns a list of statements that emits
1743 the call to the threaded functions for the block.
1744
1745 The sBranch parameter is used with conditional branches where we'll emit
1746 different threaded calls depending on whether we're in the jump-taken or
1747 no-jump code path.
1748 """
1749 # Special case for only default variation:
1750 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
1751 assert not sBranch;
1752 return self.aoVariations[0].emitThreadedCallStmts(0);
1753
1754 #
1755 # Case statement sub-class.
1756 #
1757 dByVari = self.dVariations;
1758 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
1759 class Case:
1760 def __init__(self, sCond, sVarNm = None):
1761 self.sCond = sCond;
1762 self.sVarNm = sVarNm;
1763 self.oVar = dByVari[sVarNm] if sVarNm else None;
1764 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
1765
1766 def toCode(self):
1767 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1768 if self.aoBody:
1769 aoStmts.extend(self.aoBody);
1770 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
1771 return aoStmts;
1772
1773 def toFunctionAssignment(self):
1774 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1775 if self.aoBody:
1776 aoStmts.extend([
1777 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
1778 iai.McCppGeneric('break;', cchIndent = 8),
1779 ]);
1780 return aoStmts;
1781
1782 def isSame(self, oThat):
1783 if not self.aoBody: # fall thru always matches.
1784 return True;
1785 if len(self.aoBody) != len(oThat.aoBody):
1786 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
1787 return False;
1788 for iStmt, oStmt in enumerate(self.aoBody):
1789 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
1790 assert isinstance(oStmt, iai.McCppGeneric);
1791 assert not isinstance(oStmt, iai.McStmtCond);
1792 if isinstance(oStmt, iai.McStmtCond):
1793 return False;
1794 if oStmt.sName != oThatStmt.sName:
1795 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
1796 return False;
1797 if len(oStmt.asParams) != len(oThatStmt.asParams):
1798 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
1799 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
1800 return False;
1801 for iParam, sParam in enumerate(oStmt.asParams):
1802 if ( sParam != oThatStmt.asParams[iParam]
1803 and ( iParam != 1
1804 or not isinstance(oStmt, iai.McCppCall)
1805 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
1806 or sParam != self.oVar.getIndexName()
1807 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
1808 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
1809 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
1810 return False;
1811 return True;
1812
1813 #
1814 # Determine what we're switch on.
1815 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
1816 #
1817 fSimple = True;
1818 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
1819 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
1820 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
1821 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
1822 # is not writable in 32-bit mode (at least), thus the penalty mode
1823 # for any accesses via it (simpler this way).)
1824 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
1825 fSimple = False; # threaded functions.
1826 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1827 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
1828 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
1829
1830 #
1831 # Generate the case statements.
1832 #
1833 # pylintx: disable=x
1834 aoCases = [];
1835 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
1836 assert not fSimple and not sBranch;
1837 aoCases.extend([
1838 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
1839 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
1840 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
1841 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
1842 ]);
1843 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
1844 aoCases.extend([
1845 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
1846 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
1847 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
1848 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
1849 ]);
1850 elif ThrdFnVar.ksVariation_64 in dByVari:
1851 assert fSimple and not sBranch;
1852 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
1853 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
1854 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
1855 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
1856 assert fSimple and sBranch;
1857 aoCases.append(Case('IEMMODE_64BIT',
1858 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
1859 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
1860 aoCases.append(Case('IEMMODE_64BIT | 32',
1861 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
1862
1863 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
1864 assert not fSimple and not sBranch;
1865 aoCases.extend([
1866 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
1867 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
1868 Case('IEMMODE_32BIT | 16', None), # fall thru
1869 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1870 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
1871 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
1872 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
1873 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
1874 ]);
1875 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
1876 aoCases.extend([
1877 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
1878 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
1879 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
1880 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1881 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
1882 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
1883 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
1884 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
1885 ]);
1886 elif ThrdFnVar.ksVariation_32 in dByVari:
1887 assert fSimple and not sBranch;
1888 aoCases.extend([
1889 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1890 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1891 ]);
1892 if ThrdFnVar.ksVariation_32f in dByVari:
1893 aoCases.extend([
1894 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1895 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1896 ]);
1897 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
1898 assert fSimple and sBranch;
1899 aoCases.extend([
1900 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1901 Case('IEMMODE_32BIT',
1902 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
1903 ]);
1904 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
1905 aoCases.extend([
1906 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1907 Case('IEMMODE_32BIT | 32',
1908 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
1909 ]);
1910
1911 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
1912 assert not fSimple and not sBranch;
1913 aoCases.extend([
1914 Case('IEMMODE_16BIT | 16', None), # fall thru
1915 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
1916 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
1917 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
1918 ]);
1919 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
1920 aoCases.extend([
1921 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
1922 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
1923 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
1924 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
1925 ]);
1926 elif ThrdFnVar.ksVariation_16 in dByVari:
1927 assert fSimple and not sBranch;
1928 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
1929 if ThrdFnVar.ksVariation_16f in dByVari:
1930 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
1931 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
1932 assert fSimple and sBranch;
1933 aoCases.append(Case('IEMMODE_16BIT',
1934 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
1935 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
1936 aoCases.append(Case('IEMMODE_16BIT | 32',
1937 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
1938
1939
1940 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
1941 if not fSimple:
1942 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
1943 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
1944 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
1945 if not fSimple:
1946 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
1947 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
1948
1949 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
1950 assert fSimple and sBranch;
1951 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
1952 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
1953 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
1954 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
1955 assert fSimple and sBranch;
1956 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
1957 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
1958 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
1959
1960 #
1961 # If the case bodies are all the same, except for the function called,
1962 # we can reduce the code size and hopefully compile time.
1963 #
1964 iFirstCaseWithBody = 0;
1965 while not aoCases[iFirstCaseWithBody].aoBody:
1966 iFirstCaseWithBody += 1
1967 fAllSameCases = True
1968 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
1969 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
1970 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
1971 if fAllSameCases:
1972 aoStmts = [
1973 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
1974 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1975 iai.McCppGeneric('{'),
1976 ];
1977 for oCase in aoCases:
1978 aoStmts.extend(oCase.toFunctionAssignment());
1979 aoStmts.extend([
1980 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1981 iai.McCppGeneric('}'),
1982 ]);
1983 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
1984
1985 else:
1986 #
1987 # Generate the generic switch statement.
1988 #
1989 aoStmts = [
1990 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1991 iai.McCppGeneric('{'),
1992 ];
1993 for oCase in aoCases:
1994 aoStmts.extend(oCase.toCode());
1995 aoStmts.extend([
1996 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1997 iai.McCppGeneric('}'),
1998 ]);
1999
2000 return aoStmts;
2001
2002 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2003 """
2004 Adjusts (& copies) the statements for the input/decoder so it will emit
2005 calls to the right threaded functions for each block.
2006
2007 Returns list/tree of statements (aoStmts is not modified) and updated
2008 fCallEmitted status.
2009 """
2010 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2011 aoDecoderStmts = [];
2012
2013 for iStmt, oStmt in enumerate(aoStmts):
2014 # Copy the statement. Make a deep copy to make sure we've got our own
2015 # copies of all instance variables, even if a bit overkill at the moment.
2016 oNewStmt = copy.deepcopy(oStmt);
2017 aoDecoderStmts.append(oNewStmt);
2018 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2019 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2020 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2021
2022 # If we haven't emitted the threaded function call yet, look for
2023 # statements which it would naturally follow or preceed.
2024 if not fCallEmitted:
2025 if not oStmt.isCppStmt():
2026 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2027 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2028 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2029 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2030 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2031 aoDecoderStmts.pop();
2032 if not fIsConditional:
2033 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2034 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2035 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2036 else:
2037 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2038 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2039 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2040 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2041 aoDecoderStmts.append(oNewStmt);
2042 fCallEmitted = True;
2043
2044 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2045 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2046 if not sBranchAnnotation:
2047 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2048 assert fIsConditional;
2049 aoDecoderStmts.pop();
2050 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2051 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2052 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2053 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2054 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2055 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2056 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2057 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2058 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2059 else:
2060 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2061 aoDecoderStmts.append(oNewStmt);
2062 fCallEmitted = True;
2063
2064 elif ( not fIsConditional
2065 and oStmt.fDecode
2066 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2067 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2068 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2069 fCallEmitted = True;
2070
2071 # Process branches of conditionals recursively.
2072 if isinstance(oStmt, iai.McStmtCond):
2073 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2074 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2075 if oStmt.aoElseBranch:
2076 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2077 fCallEmitted, cDepth + 1,
2078 oStmt.oElseBranchAnnotation);
2079 else:
2080 fCallEmitted2 = False;
2081 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2082
2083 if not fCallEmitted and cDepth == 0:
2084 self.raiseProblem('Unable to insert call to threaded function.');
2085
2086 return (aoDecoderStmts, fCallEmitted);
2087
2088
2089 def generateInputCode(self):
2090 """
2091 Modifies the input code.
2092 """
2093 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2094
2095 if len(self.oMcBlock.aoStmts) == 1:
2096 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2097 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2098 if self.dsCImplFlags:
2099 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2100 else:
2101 sCode += '0;\n';
2102 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2103 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2104 sIndent = ' ' * (min(cchIndent, 2) - 2);
2105 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2106 return sCode;
2107
2108 # IEM_MC_BEGIN/END block
2109 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2110 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2111 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2112 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2113 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2114
2115# Short alias for ThreadedFunctionVariation.
2116ThrdFnVar = ThreadedFunctionVariation;
2117
2118
2119class IEMThreadedGenerator(object):
2120 """
2121 The threaded code generator & annotator.
2122 """
2123
2124 def __init__(self):
2125 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2126 self.oOptions = None # type: argparse.Namespace
2127 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2128 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2129
2130 #
2131 # Processing.
2132 #
2133
2134 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2135 """
2136 Process the input files.
2137 """
2138
2139 # Parse the files.
2140 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2141
2142 # Create threaded functions for the MC blocks.
2143 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2144
2145 # Analyze the threaded functions.
2146 dRawParamCounts = {};
2147 dMinParamCounts = {};
2148 for oThreadedFunction in self.aoThreadedFuncs:
2149 oThreadedFunction.analyze();
2150 for oVariation in oThreadedFunction.aoVariations:
2151 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2152 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2153 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2154 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2155 print('debug: %s params: %4s raw, %4s min'
2156 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2157 file = sys.stderr);
2158
2159 # Populate aidxFirstFunctions. This is ASSUMING that
2160 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2161 iThreadedFunction = 0;
2162 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2163 self.aidxFirstFunctions = [];
2164 for oParser in self.aoParsers:
2165 self.aidxFirstFunctions.append(iThreadedFunction);
2166
2167 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2168 iThreadedFunction += 1;
2169 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2170
2171 # Analyze the threaded functions and their variations for native recompilation.
2172 if fNativeRecompilerEnabled:
2173 ian.displayStatistics(self.aoThreadedFuncs, sHostArch);
2174
2175 # Gather arguments + variable statistics for the MC blocks.
2176 cMaxArgs = 0;
2177 cMaxVars = 0;
2178 cMaxVarsAndArgs = 0;
2179 cbMaxArgs = 0;
2180 cbMaxVars = 0;
2181 cbMaxVarsAndArgs = 0;
2182 for oThreadedFunction in self.aoThreadedFuncs:
2183 if oThreadedFunction.oMcBlock.cLocals >= 0:
2184 # Counts.
2185 assert oThreadedFunction.oMcBlock.cArgs >= 0;
2186 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
2187 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
2188 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
2189 if cMaxVarsAndArgs > 9:
2190 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2191 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2192 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
2193 # Calc stack allocation size:
2194 cbArgs = 0;
2195 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2196 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2197 cbVars = 0;
2198 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2199 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2200 cbMaxVars = max(cbMaxVars, cbVars);
2201 cbMaxArgs = max(cbMaxArgs, cbArgs);
2202 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2203 if cbMaxVarsAndArgs >= 0xc0:
2204 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2205 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2206
2207 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2208 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2209
2210 return True;
2211
2212 #
2213 # Output
2214 #
2215
2216 def generateLicenseHeader(self):
2217 """
2218 Returns the lines for a license header.
2219 """
2220 return [
2221 '/*',
2222 ' * Autogenerated by $Id: IEMAllThrdPython.py 102977 2024-01-19 23:11:30Z vboxsync $ ',
2223 ' * Do not edit!',
2224 ' */',
2225 '',
2226 '/*',
2227 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2228 ' *',
2229 ' * This file is part of VirtualBox base platform packages, as',
2230 ' * available from https://www.virtualbox.org.',
2231 ' *',
2232 ' * This program is free software; you can redistribute it and/or',
2233 ' * modify it under the terms of the GNU General Public License',
2234 ' * as published by the Free Software Foundation, in version 3 of the',
2235 ' * License.',
2236 ' *',
2237 ' * This program is distributed in the hope that it will be useful, but',
2238 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2239 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2240 ' * General Public License for more details.',
2241 ' *',
2242 ' * You should have received a copy of the GNU General Public License',
2243 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2244 ' *',
2245 ' * The contents of this file may alternatively be used under the terms',
2246 ' * of the Common Development and Distribution License Version 1.0',
2247 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2248 ' * in the VirtualBox distribution, in which case the provisions of the',
2249 ' * CDDL are applicable instead of those of the GPL.',
2250 ' *',
2251 ' * You may elect to license modified versions of this file under the',
2252 ' * terms and conditions of either the GPL or the CDDL or both.',
2253 ' *',
2254 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2255 ' */',
2256 '',
2257 '',
2258 '',
2259 ];
2260
2261 ## List of built-in threaded functions with user argument counts and
2262 ## whether it has a native recompiler implementation.
2263 katBltIns = (
2264 ( 'Nop', 0, True ),
2265 ( 'LogCpuState', 0, True ),
2266
2267 ( 'DeferToCImpl0', 2, True ),
2268 ( 'CheckIrq', 0, True ),
2269 ( 'CheckMode', 1, True ),
2270 ( 'CheckHwInstrBps', 0, False ),
2271 ( 'CheckCsLim', 1, True ),
2272
2273 ( 'CheckCsLimAndOpcodes', 3, True ),
2274 ( 'CheckOpcodes', 3, True ),
2275 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2276
2277 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2278 ( 'CheckPcAndOpcodes', 3, True ),
2279 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2280
2281 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2282 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2283 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2284
2285 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2286 ( 'CheckOpcodesLoadingTlb', 3, True ),
2287 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2288
2289 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2290 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2291 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2292
2293 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2294 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2295 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2296 );
2297
2298 def generateThreadedFunctionsHeader(self, oOut):
2299 """
2300 Generates the threaded functions header file.
2301 Returns success indicator.
2302 """
2303
2304 asLines = self.generateLicenseHeader();
2305
2306 # Generate the threaded function table indexes.
2307 asLines += [
2308 'typedef enum IEMTHREADEDFUNCS',
2309 '{',
2310 ' kIemThreadedFunc_Invalid = 0,',
2311 '',
2312 ' /*',
2313 ' * Predefined',
2314 ' */',
2315 ];
2316 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2317
2318 iThreadedFunction = 1 + len(self.katBltIns);
2319 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2320 asLines += [
2321 '',
2322 ' /*',
2323 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2324 ' */',
2325 ];
2326 for oThreadedFunction in self.aoThreadedFuncs:
2327 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2328 if oVariation:
2329 iThreadedFunction += 1;
2330 oVariation.iEnumValue = iThreadedFunction;
2331 asLines.append(' ' + oVariation.getIndexName() + ',');
2332 asLines += [
2333 ' kIemThreadedFunc_End',
2334 '} IEMTHREADEDFUNCS;',
2335 '',
2336 ];
2337
2338 # Prototype the function table.
2339 asLines += [
2340 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2341 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2342 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2343 '#endif',
2344 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2345 ];
2346
2347 oOut.write('\n'.join(asLines));
2348 return True;
2349
2350 ksBitsToIntMask = {
2351 1: "UINT64_C(0x1)",
2352 2: "UINT64_C(0x3)",
2353 4: "UINT64_C(0xf)",
2354 8: "UINT64_C(0xff)",
2355 16: "UINT64_C(0xffff)",
2356 32: "UINT64_C(0xffffffff)",
2357 };
2358
2359 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2360 """
2361 Outputs code for unpacking parameters.
2362 This is shared by the threaded and native code generators.
2363 """
2364 aasVars = [];
2365 for aoRefs in oVariation.dParamRefs.values():
2366 oRef = aoRefs[0];
2367 if oRef.sType[0] != 'P':
2368 cBits = g_kdTypeInfo[oRef.sType][0];
2369 sType = g_kdTypeInfo[oRef.sType][2];
2370 else:
2371 cBits = 64;
2372 sType = oRef.sType;
2373
2374 sTypeDecl = sType + ' const';
2375
2376 if cBits == 64:
2377 assert oRef.offNewParam == 0;
2378 if sType == 'uint64_t':
2379 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2380 else:
2381 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2382 elif oRef.offNewParam == 0:
2383 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2384 else:
2385 sUnpack = '(%s)((%s >> %s) & %s);' \
2386 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2387
2388 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2389
2390 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2391 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2392 acchVars = [0, 0, 0, 0, 0];
2393 for asVar in aasVars:
2394 for iCol, sStr in enumerate(asVar):
2395 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2396 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2397 for asVar in sorted(aasVars):
2398 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2399 return True;
2400
2401 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2402 def generateThreadedFunctionsSource(self, oOut):
2403 """
2404 Generates the threaded functions source file.
2405 Returns success indicator.
2406 """
2407
2408 asLines = self.generateLicenseHeader();
2409 oOut.write('\n'.join(asLines));
2410
2411 #
2412 # Emit the function definitions.
2413 #
2414 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2415 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2416 oOut.write( '\n'
2417 + '\n'
2418 + '\n'
2419 + '\n'
2420 + '/*' + '*' * 128 + '\n'
2421 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2422 + '*' * 128 + '*/\n');
2423
2424 for oThreadedFunction in self.aoThreadedFuncs:
2425 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2426 if oVariation:
2427 oMcBlock = oThreadedFunction.oMcBlock;
2428
2429 # Function header
2430 oOut.write( '\n'
2431 + '\n'
2432 + '/**\n'
2433 + ' * #%u: %s at line %s offset %s in %s%s\n'
2434 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2435 os.path.split(oMcBlock.sSrcFile)[1],
2436 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2437 + ' */\n'
2438 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2439 + '{\n');
2440
2441 # Unpack parameters.
2442 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2443
2444 # RT_NOREF for unused parameters.
2445 if oVariation.cMinParams < g_kcThreadedParams:
2446 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2447
2448 # Now for the actual statements.
2449 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2450
2451 oOut.write('}\n');
2452
2453
2454 #
2455 # Generate the output tables in parallel.
2456 #
2457 asFuncTable = [
2458 '/**',
2459 ' * Function pointer table.',
2460 ' */',
2461 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2462 '{',
2463 ' /*Invalid*/ NULL,',
2464 ];
2465 asNameTable = [
2466 '/**',
2467 ' * Function name table.',
2468 ' */',
2469 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2470 '{',
2471 ' "Invalid",',
2472 ];
2473 asArgCntTab = [
2474 '/**',
2475 ' * Argument count table.',
2476 ' */',
2477 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2478 '{',
2479 ' 0, /*Invalid*/',
2480 ];
2481 aasTables = (asFuncTable, asNameTable, asArgCntTab,);
2482
2483 for asTable in aasTables:
2484 asTable.extend((
2485 '',
2486 ' /*',
2487 ' * Predefined.',
2488 ' */',
2489 ));
2490 for sFuncNm, cArgs, _ in self.katBltIns:
2491 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2492 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2493 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2494
2495 iThreadedFunction = 1 + len(self.katBltIns);
2496 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2497 for asTable in aasTables:
2498 asTable.extend((
2499 '',
2500 ' /*',
2501 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2502 ' */',
2503 ));
2504 for oThreadedFunction in self.aoThreadedFuncs:
2505 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2506 if oVariation:
2507 iThreadedFunction += 1;
2508 assert oVariation.iEnumValue == iThreadedFunction;
2509 sName = oVariation.getThreadedFunctionName();
2510 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2511 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2512 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2513
2514 for asTable in aasTables:
2515 asTable.append('};');
2516
2517 #
2518 # Output the tables.
2519 #
2520 oOut.write( '\n'
2521 + '\n');
2522 oOut.write('\n'.join(asFuncTable));
2523 oOut.write( '\n'
2524 + '\n'
2525 + '\n'
2526 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2527 oOut.write('\n'.join(asNameTable));
2528 oOut.write( '\n'
2529 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2530 + '\n'
2531 + '\n');
2532 oOut.write('\n'.join(asArgCntTab));
2533 oOut.write('\n');
2534
2535 return True;
2536
2537 def generateNativeFunctionsHeader(self, oOut):
2538 """
2539 Generates the native recompiler functions header file.
2540 Returns success indicator.
2541 """
2542 if not self.oOptions.fNativeRecompilerEnabled:
2543 return True;
2544
2545 asLines = self.generateLicenseHeader();
2546
2547 # Prototype the function table.
2548 asLines += [
2549 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2550 '',
2551 ];
2552
2553 # Emit indicators as to which of the builtin functions have a native
2554 # recompiler function and which not. (We only really need this for
2555 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
2556 for atBltIn in self.katBltIns:
2557 if atBltIn[1]:
2558 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
2559 else:
2560 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
2561
2562 oOut.write('\n'.join(asLines));
2563 return True;
2564
2565 def generateNativeFunctionsSource(self, oOut):
2566 """
2567 Generates the native recompiler functions source file.
2568 Returns success indicator.
2569 """
2570 if not self.oOptions.fNativeRecompilerEnabled:
2571 return True;
2572
2573 #
2574 # The file header.
2575 #
2576 oOut.write('\n'.join(self.generateLicenseHeader()));
2577
2578 #
2579 # Emit the functions.
2580 #
2581 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2582 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2583 oOut.write( '\n'
2584 + '\n'
2585 + '\n'
2586 + '\n'
2587 + '/*' + '*' * 128 + '\n'
2588 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2589 + '*' * 128 + '*/\n');
2590
2591 for oThreadedFunction in self.aoThreadedFuncs:
2592 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2593 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2594 oMcBlock = oThreadedFunction.oMcBlock;
2595
2596 # Function header
2597 oOut.write( '\n'
2598 + '\n'
2599 + '/**\n'
2600 + ' * #%u: %s at line %s offset %s in %s%s\n'
2601 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2602 os.path.split(oMcBlock.sSrcFile)[1],
2603 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2604 + ' */\n'
2605 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
2606 + '{\n');
2607
2608 # Unpack parameters.
2609 self.generateFunctionParameterUnpacking(oVariation, oOut,
2610 ('pCallEntry->auParams[0]',
2611 'pCallEntry->auParams[1]',
2612 'pCallEntry->auParams[2]',));
2613
2614 # Now for the actual statements.
2615 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2616
2617 oOut.write('}\n');
2618
2619 #
2620 # Output the function table.
2621 #
2622 oOut.write( '\n'
2623 + '\n'
2624 + '/*\n'
2625 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2626 + ' */\n'
2627 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
2628 + '{\n'
2629 + ' /*Invalid*/ NULL,'
2630 + '\n'
2631 + ' /*\n'
2632 + ' * Predefined.\n'
2633 + ' */\n'
2634 );
2635 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2636 if fHaveRecompFunc:
2637 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
2638 else:
2639 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2640
2641 iThreadedFunction = 1 + len(self.katBltIns);
2642 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2643 oOut.write( ' /*\n'
2644 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2645 + ' */\n');
2646 for oThreadedFunction in self.aoThreadedFuncs:
2647 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2648 if oVariation:
2649 iThreadedFunction += 1;
2650 assert oVariation.iEnumValue == iThreadedFunction;
2651 sName = oVariation.getNativeFunctionName();
2652 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2653 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2654 else:
2655 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2656
2657 oOut.write( '};\n'
2658 + '\n');
2659 return True;
2660
2661
2662 def getThreadedFunctionByIndex(self, idx):
2663 """
2664 Returns a ThreadedFunction object for the given index. If the index is
2665 out of bounds, a dummy is returned.
2666 """
2667 if idx < len(self.aoThreadedFuncs):
2668 return self.aoThreadedFuncs[idx];
2669 return ThreadedFunction.dummyInstance();
2670
2671 def generateModifiedInput(self, oOut, idxFile):
2672 """
2673 Generates the combined modified input source/header file.
2674 Returns success indicator.
2675 """
2676 #
2677 # File header and assert assumptions.
2678 #
2679 oOut.write('\n'.join(self.generateLicenseHeader()));
2680 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
2681
2682 #
2683 # Iterate all parsers (input files) and output the ones related to the
2684 # file set given by idxFile.
2685 #
2686 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
2687 # Is this included in the file set?
2688 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
2689 fInclude = -1;
2690 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
2691 if sSrcBaseFile == aoInfo[0].lower():
2692 fInclude = aoInfo[2] in (-1, idxFile);
2693 break;
2694 if fInclude is not True:
2695 assert fInclude is False;
2696 continue;
2697
2698 # Output it.
2699 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
2700
2701 iThreadedFunction = self.aidxFirstFunctions[idxParser];
2702 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2703 iLine = 0;
2704 while iLine < len(oParser.asLines):
2705 sLine = oParser.asLines[iLine];
2706 iLine += 1; # iBeginLine and iEndLine are 1-based.
2707
2708 # Can we pass it thru?
2709 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
2710 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
2711 oOut.write(sLine);
2712 #
2713 # Single MC block. Just extract it and insert the replacement.
2714 #
2715 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
2716 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
2717 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
2718 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
2719 sModified = oThreadedFunction.generateInputCode().strip();
2720 oOut.write(sModified);
2721
2722 iLine = oThreadedFunction.oMcBlock.iEndLine;
2723 sLine = oParser.asLines[iLine - 1];
2724 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
2725 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
2726 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
2727 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
2728
2729 # Advance
2730 iThreadedFunction += 1;
2731 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2732 #
2733 # Macro expansion line that have sublines and may contain multiple MC blocks.
2734 #
2735 else:
2736 offLine = 0;
2737 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
2738 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
2739
2740 sModified = oThreadedFunction.generateInputCode().strip();
2741 assert ( sModified.startswith('IEM_MC_BEGIN')
2742 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
2743 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
2744 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
2745 ), 'sModified="%s"' % (sModified,);
2746 oOut.write(sModified);
2747
2748 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
2749
2750 # Advance
2751 iThreadedFunction += 1;
2752 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2753
2754 # Last line segment.
2755 if offLine < len(sLine):
2756 oOut.write(sLine[offLine : ]);
2757
2758 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
2759
2760 return True;
2761
2762 def generateModifiedInput1(self, oOut):
2763 """
2764 Generates the combined modified input source/header file, part 1.
2765 Returns success indicator.
2766 """
2767 return self.generateModifiedInput(oOut, 1);
2768
2769 def generateModifiedInput2(self, oOut):
2770 """
2771 Generates the combined modified input source/header file, part 2.
2772 Returns success indicator.
2773 """
2774 return self.generateModifiedInput(oOut, 2);
2775
2776 def generateModifiedInput3(self, oOut):
2777 """
2778 Generates the combined modified input source/header file, part 3.
2779 Returns success indicator.
2780 """
2781 return self.generateModifiedInput(oOut, 3);
2782
2783 def generateModifiedInput4(self, oOut):
2784 """
2785 Generates the combined modified input source/header file, part 4.
2786 Returns success indicator.
2787 """
2788 return self.generateModifiedInput(oOut, 4);
2789
2790
2791 #
2792 # Main
2793 #
2794
2795 def main(self, asArgs):
2796 """
2797 C-like main function.
2798 Returns exit code.
2799 """
2800
2801 #
2802 # Parse arguments
2803 #
2804 sScriptDir = os.path.dirname(__file__);
2805 oParser = argparse.ArgumentParser(add_help = False);
2806 oParser.add_argument('asInFiles',
2807 metavar = 'input.cpp.h',
2808 nargs = '*',
2809 default = [os.path.join(sScriptDir, aoInfo[0])
2810 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
2811 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
2812 oParser.add_argument('--host-arch',
2813 metavar = 'arch',
2814 dest = 'sHostArch',
2815 action = 'store',
2816 default = None,
2817 help = 'The host architecture.');
2818
2819 oParser.add_argument('--out-thrd-funcs-hdr',
2820 metavar = 'file-thrd-funcs.h',
2821 dest = 'sOutFileThrdFuncsHdr',
2822 action = 'store',
2823 default = '-',
2824 help = 'The output header file for the threaded functions.');
2825 oParser.add_argument('--out-thrd-funcs-cpp',
2826 metavar = 'file-thrd-funcs.cpp',
2827 dest = 'sOutFileThrdFuncsCpp',
2828 action = 'store',
2829 default = '-',
2830 help = 'The output C++ file for the threaded functions.');
2831 oParser.add_argument('--out-n8ve-funcs-hdr',
2832 metavar = 'file-n8tv-funcs.h',
2833 dest = 'sOutFileN8veFuncsHdr',
2834 action = 'store',
2835 default = '-',
2836 help = 'The output header file for the native recompiler functions.');
2837 oParser.add_argument('--out-n8ve-funcs-cpp',
2838 metavar = 'file-n8tv-funcs.cpp',
2839 dest = 'sOutFileN8veFuncsCpp',
2840 action = 'store',
2841 default = '-',
2842 help = 'The output C++ file for the native recompiler functions.');
2843 oParser.add_argument('--native',
2844 dest = 'fNativeRecompilerEnabled',
2845 action = 'store_true',
2846 default = False,
2847 help = 'Enables generating the files related to native recompilation.');
2848 oParser.add_argument('--out-mod-input1',
2849 metavar = 'file-instr.cpp.h',
2850 dest = 'sOutFileModInput1',
2851 action = 'store',
2852 default = '-',
2853 help = 'The output C++/header file for modified input instruction files part 1.');
2854 oParser.add_argument('--out-mod-input2',
2855 metavar = 'file-instr.cpp.h',
2856 dest = 'sOutFileModInput2',
2857 action = 'store',
2858 default = '-',
2859 help = 'The output C++/header file for modified input instruction files part 2.');
2860 oParser.add_argument('--out-mod-input3',
2861 metavar = 'file-instr.cpp.h',
2862 dest = 'sOutFileModInput3',
2863 action = 'store',
2864 default = '-',
2865 help = 'The output C++/header file for modified input instruction files part 3.');
2866 oParser.add_argument('--out-mod-input4',
2867 metavar = 'file-instr.cpp.h',
2868 dest = 'sOutFileModInput4',
2869 action = 'store',
2870 default = '-',
2871 help = 'The output C++/header file for modified input instruction files part 4.');
2872 oParser.add_argument('--help', '-h', '-?',
2873 action = 'help',
2874 help = 'Display help and exit.');
2875 oParser.add_argument('--version', '-V',
2876 action = 'version',
2877 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
2878 % (__version__.split()[1], iai.__version__.split()[1],),
2879 help = 'Displays the version/revision of the script and exit.');
2880 self.oOptions = oParser.parse_args(asArgs[1:]);
2881 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
2882
2883 #
2884 # Process the instructions specified in the IEM sources.
2885 #
2886 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
2887 #
2888 # Generate the output files.
2889 #
2890 aaoOutputFiles = (
2891 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
2892 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
2893 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
2894 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
2895 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
2896 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
2897 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
2898 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
2899 );
2900 fRc = True;
2901 for sOutFile, fnGenMethod in aaoOutputFiles:
2902 if sOutFile == '-':
2903 fRc = fnGenMethod(sys.stdout) and fRc;
2904 else:
2905 try:
2906 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
2907 except Exception as oXcpt:
2908 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
2909 return 1;
2910 fRc = fnGenMethod(oOut) and fRc;
2911 oOut.close();
2912 if fRc:
2913 return 0;
2914
2915 return 1;
2916
2917
2918if __name__ == '__main__':
2919 sys.exit(IEMThreadedGenerator().main(sys.argv));
2920
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette