1 | /* $Id: IEMN8veRecompiler.h 104407 2024-04-23 23:16:04Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * IEM - Interpreted Execution Manager - Native Recompiler Internals.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2011-2023 Oracle and/or its affiliates.
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox base platform packages, as
|
---|
10 | * available from https://www.virtualbox.org.
|
---|
11 | *
|
---|
12 | * This program is free software; you can redistribute it and/or
|
---|
13 | * modify it under the terms of the GNU General Public License
|
---|
14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
15 | * License.
|
---|
16 | *
|
---|
17 | * This program is distributed in the hope that it will be useful, but
|
---|
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
20 | * General Public License for more details.
|
---|
21 | *
|
---|
22 | * You should have received a copy of the GNU General Public License
|
---|
23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
24 | *
|
---|
25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
26 | */
|
---|
27 |
|
---|
28 | #ifndef VMM_INCLUDED_SRC_include_IEMN8veRecompiler_h
|
---|
29 | #define VMM_INCLUDED_SRC_include_IEMN8veRecompiler_h
|
---|
30 | #ifndef RT_WITHOUT_PRAGMA_ONCE
|
---|
31 | # pragma once
|
---|
32 | #endif
|
---|
33 |
|
---|
34 |
|
---|
35 | /** @defgroup grp_iem_n8ve_re Native Recompiler Internals.
|
---|
36 | * @ingroup grp_iem_int
|
---|
37 | * @{
|
---|
38 | */
|
---|
39 |
|
---|
40 | #include <iprt/assertcompile.h> /* for RT_IN_ASSEMBLER mode */
|
---|
41 |
|
---|
42 | /** @def IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
43 | * Enables generating internal debug info for better TB disassembly dumping. */
|
---|
44 | #if defined(DEBUG) || defined(DOXYGEN_RUNNING)
|
---|
45 | # define IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
46 | #endif
|
---|
47 |
|
---|
48 | /** @def IEMNATIVE_WITH_LIVENESS_ANALYSIS
|
---|
49 | * Enables liveness analysis. */
|
---|
50 | #if 1 || defined(DOXYGEN_RUNNING)
|
---|
51 | # define IEMNATIVE_WITH_LIVENESS_ANALYSIS
|
---|
52 | /*# define IEMLIVENESS_EXTENDED_LAYOUT*/
|
---|
53 | #endif
|
---|
54 |
|
---|
55 | /** @def IEMNATIVE_WITH_EFLAGS_SKIPPING
|
---|
56 | * Enables skipping EFLAGS calculations/updating based on liveness info. */
|
---|
57 | #if defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS) || defined(DOXYGEN_RUNNING)
|
---|
58 | # define IEMNATIVE_WITH_EFLAGS_SKIPPING
|
---|
59 | #endif
|
---|
60 |
|
---|
61 |
|
---|
62 | /** @def IEMNATIVE_STRICT_EFLAGS_SKIPPING
|
---|
63 | * Enables strict consistency checks around EFLAGS skipping.
|
---|
64 | * @note Only defined when IEMNATIVE_WITH_EFLAGS_SKIPPING is also defined. */
|
---|
65 | #ifdef IEMNATIVE_WITH_EFLAGS_SKIPPING
|
---|
66 | # ifdef VBOX_STRICT
|
---|
67 | # define IEMNATIVE_STRICT_EFLAGS_SKIPPING
|
---|
68 | # endif
|
---|
69 | #elif defined(DOXYGEN_RUNNING)
|
---|
70 | # define IEMNATIVE_STRICT_EFLAGS_SKIPPING
|
---|
71 | #endif
|
---|
72 |
|
---|
73 | #ifdef VBOX_WITH_STATISTICS
|
---|
74 | /** Always count instructions for now. */
|
---|
75 | # define IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
76 | #endif
|
---|
77 |
|
---|
78 | /** @def IEMNATIVE_WITH_RECOMPILER_PROLOGUE_SINGLETON
|
---|
79 | * Enables having only a single prologue for native TBs. */
|
---|
80 | #if 1 || defined(DOXYGEN_RUNNING)
|
---|
81 | # define IEMNATIVE_WITH_RECOMPILER_PROLOGUE_SINGLETON
|
---|
82 | #endif
|
---|
83 |
|
---|
84 |
|
---|
85 | /** @name Stack Frame Layout
|
---|
86 | *
|
---|
87 | * @{ */
|
---|
88 | /** The size of the area for stack variables and spills and stuff.
|
---|
89 | * @note This limit is duplicated in the python script(s). We add 0x40 for
|
---|
90 | * alignment padding. */
|
---|
91 | #define IEMNATIVE_FRAME_VAR_SIZE (0xc0 + 0x40)
|
---|
92 | /** Number of 64-bit variable slots (0x100 / 8 = 32. */
|
---|
93 | #define IEMNATIVE_FRAME_VAR_SLOTS (IEMNATIVE_FRAME_VAR_SIZE / 8)
|
---|
94 | AssertCompile(IEMNATIVE_FRAME_VAR_SLOTS == 32);
|
---|
95 |
|
---|
96 | #ifdef RT_ARCH_AMD64
|
---|
97 | /** An stack alignment adjustment (between non-volatile register pushes and
|
---|
98 | * the stack variable area, so the latter better aligned). */
|
---|
99 | # define IEMNATIVE_FRAME_ALIGN_SIZE 8
|
---|
100 |
|
---|
101 | /** Number of stack arguments slots for calls made from the frame. */
|
---|
102 | # ifdef RT_OS_WINDOWS
|
---|
103 | # define IEMNATIVE_FRAME_STACK_ARG_COUNT 4
|
---|
104 | # else
|
---|
105 | # define IEMNATIVE_FRAME_STACK_ARG_COUNT 2
|
---|
106 | # endif
|
---|
107 | /** Number of any shadow arguments (spill area) for calls we make. */
|
---|
108 | # ifdef RT_OS_WINDOWS
|
---|
109 | # define IEMNATIVE_FRAME_SHADOW_ARG_COUNT 4
|
---|
110 | # else
|
---|
111 | # define IEMNATIVE_FRAME_SHADOW_ARG_COUNT 0
|
---|
112 | # endif
|
---|
113 |
|
---|
114 | /** Frame pointer (RBP) relative offset of the last push. */
|
---|
115 | # ifdef RT_OS_WINDOWS
|
---|
116 | # define IEMNATIVE_FP_OFF_LAST_PUSH (7 * -8)
|
---|
117 | # else
|
---|
118 | # define IEMNATIVE_FP_OFF_LAST_PUSH (5 * -8)
|
---|
119 | # endif
|
---|
120 | /** Frame pointer (RBP) relative offset of the stack variable area (the lowest
|
---|
121 | * address for it). */
|
---|
122 | # define IEMNATIVE_FP_OFF_STACK_VARS (IEMNATIVE_FP_OFF_LAST_PUSH - IEMNATIVE_FRAME_ALIGN_SIZE - IEMNATIVE_FRAME_VAR_SIZE)
|
---|
123 | /** Frame pointer (RBP) relative offset of the first stack argument for calls. */
|
---|
124 | # define IEMNATIVE_FP_OFF_STACK_ARG0 (IEMNATIVE_FP_OFF_STACK_VARS - IEMNATIVE_FRAME_STACK_ARG_COUNT * 8)
|
---|
125 | /** Frame pointer (RBP) relative offset of the second stack argument for calls. */
|
---|
126 | # define IEMNATIVE_FP_OFF_STACK_ARG1 (IEMNATIVE_FP_OFF_STACK_ARG0 + 8)
|
---|
127 | # ifdef RT_OS_WINDOWS
|
---|
128 | /** Frame pointer (RBP) relative offset of the third stack argument for calls. */
|
---|
129 | # define IEMNATIVE_FP_OFF_STACK_ARG2 (IEMNATIVE_FP_OFF_STACK_ARG0 + 16)
|
---|
130 | /** Frame pointer (RBP) relative offset of the fourth stack argument for calls. */
|
---|
131 | # define IEMNATIVE_FP_OFF_STACK_ARG3 (IEMNATIVE_FP_OFF_STACK_ARG0 + 24)
|
---|
132 | # endif
|
---|
133 |
|
---|
134 | # ifdef RT_OS_WINDOWS
|
---|
135 | /** Frame pointer (RBP) relative offset of the first incoming shadow argument. */
|
---|
136 | # define IEMNATIVE_FP_OFF_IN_SHADOW_ARG0 (16)
|
---|
137 | /** Frame pointer (RBP) relative offset of the second incoming shadow argument. */
|
---|
138 | # define IEMNATIVE_FP_OFF_IN_SHADOW_ARG1 (24)
|
---|
139 | /** Frame pointer (RBP) relative offset of the third incoming shadow argument. */
|
---|
140 | # define IEMNATIVE_FP_OFF_IN_SHADOW_ARG2 (32)
|
---|
141 | /** Frame pointer (RBP) relative offset of the fourth incoming shadow argument. */
|
---|
142 | # define IEMNATIVE_FP_OFF_IN_SHADOW_ARG3 (40)
|
---|
143 | # endif
|
---|
144 |
|
---|
145 | #elif RT_ARCH_ARM64
|
---|
146 | /** No alignment padding needed for arm64. */
|
---|
147 | # define IEMNATIVE_FRAME_ALIGN_SIZE 0
|
---|
148 | /** No stack argument slots, got 8 registers for arguments will suffice. */
|
---|
149 | # define IEMNATIVE_FRAME_STACK_ARG_COUNT 0
|
---|
150 | /** There are no argument spill area. */
|
---|
151 | # define IEMNATIVE_FRAME_SHADOW_ARG_COUNT 0
|
---|
152 |
|
---|
153 | /** Number of saved registers at the top of our stack frame.
|
---|
154 | * This includes the return address and old frame pointer, so x19 thru x30. */
|
---|
155 | # define IEMNATIVE_FRAME_SAVE_REG_COUNT (12)
|
---|
156 | /** The size of the save registered (IEMNATIVE_FRAME_SAVE_REG_COUNT). */
|
---|
157 | # define IEMNATIVE_FRAME_SAVE_REG_SIZE (IEMNATIVE_FRAME_SAVE_REG_COUNT * 8)
|
---|
158 |
|
---|
159 | /** Frame pointer (BP) relative offset of the last push. */
|
---|
160 | # define IEMNATIVE_FP_OFF_LAST_PUSH (10 * -8)
|
---|
161 |
|
---|
162 | /** Frame pointer (BP) relative offset of the stack variable area (the lowest
|
---|
163 | * address for it). */
|
---|
164 | # define IEMNATIVE_FP_OFF_STACK_VARS (IEMNATIVE_FP_OFF_LAST_PUSH - IEMNATIVE_FRAME_ALIGN_SIZE - IEMNATIVE_FRAME_VAR_SIZE)
|
---|
165 |
|
---|
166 | #else
|
---|
167 | # error "port me"
|
---|
168 | #endif
|
---|
169 | /** @} */
|
---|
170 |
|
---|
171 |
|
---|
172 | /** @name Fixed Register Allocation(s)
|
---|
173 | * @{ */
|
---|
174 | /** @def IEMNATIVE_REG_FIXED_PVMCPU
|
---|
175 | * The number of the register holding the pVCpu pointer. */
|
---|
176 | /** @def IEMNATIVE_REG_FIXED_PCPUMCTX
|
---|
177 | * The number of the register holding the &pVCpu->cpum.GstCtx pointer.
|
---|
178 | * @note This not available on AMD64, only ARM64. */
|
---|
179 | /** @def IEMNATIVE_REG_FIXED_TMP0
|
---|
180 | * Dedicated temporary register.
|
---|
181 | * @todo replace this by a register allocator and content tracker. */
|
---|
182 | /** @def IEMNATIVE_REG_FIXED_MASK
|
---|
183 | * Mask GPRs with fixes assignments, either by us or dictated by the CPU/OS
|
---|
184 | * architecture. */
|
---|
185 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
186 | /** @def IEMNATIVE_SIMD_REG_FIXED_TMP0
|
---|
187 | * Mask SIMD registers with fixes assignments, either by us or dictated by the CPU/OS
|
---|
188 | * architecture. */
|
---|
189 | /** @def IEMNATIVE_SIMD_REG_FIXED_TMP0
|
---|
190 | * Dedicated temporary SIMD register. */
|
---|
191 | #endif
|
---|
192 | #if defined(RT_ARCH_ARM64) || defined(DOXYGEN_RUNNING) /* arm64 goes first because of doxygen */
|
---|
193 | # define IEMNATIVE_REG_FIXED_PVMCPU ARMV8_A64_REG_X28
|
---|
194 | # define IEMNATIVE_REG_FIXED_PVMCPU_ASM RT_CONCAT(x,IEMNATIVE_REG_FIXED_PVMCPU)
|
---|
195 | # define IEMNATIVE_REG_FIXED_PCPUMCTX ARMV8_A64_REG_X27
|
---|
196 | # define IEMNATIVE_REG_FIXED_PCPUMCTX_ASM RT_CONCAT(x,IEMNATIVE_REG_FIXED_PCPUMCTX)
|
---|
197 | # define IEMNATIVE_REG_FIXED_TMP0 ARMV8_A64_REG_X15
|
---|
198 | # if defined(IEMNATIVE_WITH_DELAYED_PC_UPDATING) && 0 /* debug the updating with a shadow RIP. */
|
---|
199 | # define IEMNATIVE_REG_FIXED_TMP1 ARMV8_A64_REG_X16
|
---|
200 | # define IEMNATIVE_REG_FIXED_PC_DBG ARMV8_A64_REG_X26
|
---|
201 | # define IEMNATIVE_REG_FIXED_MASK_ADD ( RT_BIT_32(IEMNATIVE_REG_FIXED_TMP1) \
|
---|
202 | | RT_BIT_32(IEMNATIVE_REG_FIXED_PC_DBG))
|
---|
203 | # else
|
---|
204 | # define IEMNATIVE_REG_FIXED_MASK_ADD 0
|
---|
205 | # endif
|
---|
206 | # define IEMNATIVE_REG_FIXED_MASK ( RT_BIT_32(ARMV8_A64_REG_SP) \
|
---|
207 | | RT_BIT_32(ARMV8_A64_REG_LR) \
|
---|
208 | | RT_BIT_32(ARMV8_A64_REG_BP) \
|
---|
209 | | RT_BIT_32(IEMNATIVE_REG_FIXED_PVMCPU) \
|
---|
210 | | RT_BIT_32(IEMNATIVE_REG_FIXED_PCPUMCTX) \
|
---|
211 | | RT_BIT_32(ARMV8_A64_REG_X18) \
|
---|
212 | | RT_BIT_32(IEMNATIVE_REG_FIXED_TMP0) \
|
---|
213 | | IEMNATIVE_REG_FIXED_MASK_ADD)
|
---|
214 |
|
---|
215 | # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
216 | # define IEMNATIVE_SIMD_REG_FIXED_TMP0 ARMV8_A64_REG_Q30
|
---|
217 | # if defined(IEMNATIVE_WITH_SIMD_REG_ACCESS_ALL_REGISTERS)
|
---|
218 | # define IEMNATIVE_SIMD_REG_FIXED_MASK RT_BIT_32(ARMV8_A64_REG_Q30)
|
---|
219 | # else
|
---|
220 | /** @note
|
---|
221 | * ARM64 has 32 registers, but they are only 128-bit wide. So, in order to
|
---|
222 | * support emulating 256-bit registers we pair two real registers statically to
|
---|
223 | * one virtual for now, leaving us with only 16 256-bit registers. We always
|
---|
224 | * pair v0 with v1, v2 with v3, etc. so we mark the higher register as fixed and
|
---|
225 | * the register allocator assumes that it will be always free when the lower is
|
---|
226 | * picked.
|
---|
227 | *
|
---|
228 | * Also ARM64 declares the low 64-bit of v8-v15 as callee saved, so we don't
|
---|
229 | * touch them in order to avoid having to save and restore them in the
|
---|
230 | * prologue/epilogue.
|
---|
231 | */
|
---|
232 | # define IEMNATIVE_SIMD_REG_FIXED_MASK ( UINT32_C(0xff00) \
|
---|
233 | | RT_BIT_32(ARMV8_A64_REG_Q31) \
|
---|
234 | | RT_BIT_32(ARMV8_A64_REG_Q30) \
|
---|
235 | | RT_BIT_32(ARMV8_A64_REG_Q29) \
|
---|
236 | | RT_BIT_32(ARMV8_A64_REG_Q27) \
|
---|
237 | | RT_BIT_32(ARMV8_A64_REG_Q25) \
|
---|
238 | | RT_BIT_32(ARMV8_A64_REG_Q23) \
|
---|
239 | | RT_BIT_32(ARMV8_A64_REG_Q21) \
|
---|
240 | | RT_BIT_32(ARMV8_A64_REG_Q19) \
|
---|
241 | | RT_BIT_32(ARMV8_A64_REG_Q17) \
|
---|
242 | | RT_BIT_32(ARMV8_A64_REG_Q15) \
|
---|
243 | | RT_BIT_32(ARMV8_A64_REG_Q13) \
|
---|
244 | | RT_BIT_32(ARMV8_A64_REG_Q11) \
|
---|
245 | | RT_BIT_32(ARMV8_A64_REG_Q9) \
|
---|
246 | | RT_BIT_32(ARMV8_A64_REG_Q7) \
|
---|
247 | | RT_BIT_32(ARMV8_A64_REG_Q5) \
|
---|
248 | | RT_BIT_32(ARMV8_A64_REG_Q3) \
|
---|
249 | | RT_BIT_32(ARMV8_A64_REG_Q1))
|
---|
250 | # endif
|
---|
251 | # endif
|
---|
252 |
|
---|
253 | #elif defined(RT_ARCH_AMD64)
|
---|
254 | # define IEMNATIVE_REG_FIXED_PVMCPU X86_GREG_xBX
|
---|
255 | # define IEMNATIVE_REG_FIXED_PVMCPU_ASM xBX
|
---|
256 | # define IEMNATIVE_REG_FIXED_TMP0 X86_GREG_x11
|
---|
257 | # define IEMNATIVE_REG_FIXED_MASK ( RT_BIT_32(IEMNATIVE_REG_FIXED_PVMCPU) \
|
---|
258 | | RT_BIT_32(IEMNATIVE_REG_FIXED_TMP0) \
|
---|
259 | | RT_BIT_32(X86_GREG_xSP) \
|
---|
260 | | RT_BIT_32(X86_GREG_xBP) )
|
---|
261 |
|
---|
262 | # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
263 | # define IEMNATIVE_SIMD_REG_FIXED_TMP0 5 /* xmm5/ymm5 */
|
---|
264 | # ifndef IEMNATIVE_WITH_SIMD_REG_ACCESS_ALL_REGISTERS
|
---|
265 | # ifndef _MSC_VER
|
---|
266 | # define IEMNATIVE_WITH_SIMD_REG_ACCESS_ALL_REGISTERS
|
---|
267 | # endif
|
---|
268 | # endif
|
---|
269 | # ifdef IEMNATIVE_WITH_SIMD_REG_ACCESS_ALL_REGISTERS
|
---|
270 | # define IEMNATIVE_SIMD_REG_FIXED_MASK (RT_BIT_32(IEMNATIVE_SIMD_REG_FIXED_TMP0))
|
---|
271 | # else
|
---|
272 | /** @note On Windows/AMD64 xmm6 through xmm15 are marked as callee saved. */
|
---|
273 | # define IEMNATIVE_SIMD_REG_FIXED_MASK ( UINT32_C(0xffc0) \
|
---|
274 | | RT_BIT_32(IEMNATIVE_SIMD_REG_FIXED_TMP0))
|
---|
275 | # endif
|
---|
276 | # endif
|
---|
277 |
|
---|
278 | #else
|
---|
279 | # error "port me"
|
---|
280 | #endif
|
---|
281 | /** @} */
|
---|
282 |
|
---|
283 | /** @name Call related registers.
|
---|
284 | * @{ */
|
---|
285 | /** @def IEMNATIVE_CALL_RET_GREG
|
---|
286 | * The return value register. */
|
---|
287 | /** @def IEMNATIVE_CALL_ARG_GREG_COUNT
|
---|
288 | * Number of arguments in registers. */
|
---|
289 | /** @def IEMNATIVE_CALL_ARG0_GREG
|
---|
290 | * The general purpose register carrying argument \#0. */
|
---|
291 | /** @def IEMNATIVE_CALL_ARG1_GREG
|
---|
292 | * The general purpose register carrying argument \#1. */
|
---|
293 | /** @def IEMNATIVE_CALL_ARG2_GREG
|
---|
294 | * The general purpose register carrying argument \#2. */
|
---|
295 | /** @def IEMNATIVE_CALL_ARG3_GREG
|
---|
296 | * The general purpose register carrying argument \#3. */
|
---|
297 | /** @def IEMNATIVE_CALL_VOLATILE_GREG_MASK
|
---|
298 | * Mask of registers the callee will not save and may trash. */
|
---|
299 | #ifdef RT_ARCH_AMD64
|
---|
300 | # define IEMNATIVE_CALL_RET_GREG X86_GREG_xAX
|
---|
301 |
|
---|
302 | # ifdef RT_OS_WINDOWS
|
---|
303 | # define IEMNATIVE_CALL_ARG_GREG_COUNT 4
|
---|
304 | # define IEMNATIVE_CALL_ARG0_GREG X86_GREG_xCX
|
---|
305 | # define IEMNATIVE_CALL_ARG1_GREG X86_GREG_xDX
|
---|
306 | # define IEMNATIVE_CALL_ARG2_GREG X86_GREG_x8
|
---|
307 | # define IEMNATIVE_CALL_ARG3_GREG X86_GREG_x9
|
---|
308 | # define IEMNATIVE_CALL_ARGS_GREG_MASK ( RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) \
|
---|
309 | | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) \
|
---|
310 | | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG) \
|
---|
311 | | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG) )
|
---|
312 | # define IEMNATIVE_CALL_VOLATILE_GREG_MASK ( RT_BIT_32(X86_GREG_xAX) \
|
---|
313 | | RT_BIT_32(X86_GREG_xCX) \
|
---|
314 | | RT_BIT_32(X86_GREG_xDX) \
|
---|
315 | | RT_BIT_32(X86_GREG_x8) \
|
---|
316 | | RT_BIT_32(X86_GREG_x9) \
|
---|
317 | | RT_BIT_32(X86_GREG_x10) \
|
---|
318 | | RT_BIT_32(X86_GREG_x11) )
|
---|
319 | # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
320 | /* xmm0 - xmm5 are marked as volatile. */
|
---|
321 | # define IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK (UINT32_C(0x3f))
|
---|
322 | # endif
|
---|
323 |
|
---|
324 | # else /* !RT_OS_WINDOWS */
|
---|
325 | # define IEMNATIVE_CALL_ARG_GREG_COUNT 6
|
---|
326 | # define IEMNATIVE_CALL_ARG0_GREG X86_GREG_xDI
|
---|
327 | # define IEMNATIVE_CALL_ARG1_GREG X86_GREG_xSI
|
---|
328 | # define IEMNATIVE_CALL_ARG2_GREG X86_GREG_xDX
|
---|
329 | # define IEMNATIVE_CALL_ARG3_GREG X86_GREG_xCX
|
---|
330 | # define IEMNATIVE_CALL_ARG4_GREG X86_GREG_x8
|
---|
331 | # define IEMNATIVE_CALL_ARG5_GREG X86_GREG_x9
|
---|
332 | # define IEMNATIVE_CALL_ARGS_GREG_MASK ( RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) \
|
---|
333 | | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) \
|
---|
334 | | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG) \
|
---|
335 | | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG) \
|
---|
336 | | RT_BIT_32(IEMNATIVE_CALL_ARG4_GREG) \
|
---|
337 | | RT_BIT_32(IEMNATIVE_CALL_ARG5_GREG) )
|
---|
338 | # define IEMNATIVE_CALL_VOLATILE_GREG_MASK ( RT_BIT_32(X86_GREG_xAX) \
|
---|
339 | | RT_BIT_32(X86_GREG_xCX) \
|
---|
340 | | RT_BIT_32(X86_GREG_xDX) \
|
---|
341 | | RT_BIT_32(X86_GREG_xDI) \
|
---|
342 | | RT_BIT_32(X86_GREG_xSI) \
|
---|
343 | | RT_BIT_32(X86_GREG_x8) \
|
---|
344 | | RT_BIT_32(X86_GREG_x9) \
|
---|
345 | | RT_BIT_32(X86_GREG_x10) \
|
---|
346 | | RT_BIT_32(X86_GREG_x11) )
|
---|
347 | # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
348 | /* xmm0 - xmm15 are marked as volatile. */
|
---|
349 | # define IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK (UINT32_C(0xffff))
|
---|
350 | # endif
|
---|
351 | # endif /* !RT_OS_WINDOWS */
|
---|
352 |
|
---|
353 | #elif defined(RT_ARCH_ARM64)
|
---|
354 | # define IEMNATIVE_CALL_RET_GREG ARMV8_A64_REG_X0
|
---|
355 | # define IEMNATIVE_CALL_ARG_GREG_COUNT 8
|
---|
356 | # define IEMNATIVE_CALL_ARG0_GREG ARMV8_A64_REG_X0
|
---|
357 | # define IEMNATIVE_CALL_ARG1_GREG ARMV8_A64_REG_X1
|
---|
358 | # define IEMNATIVE_CALL_ARG2_GREG ARMV8_A64_REG_X2
|
---|
359 | # define IEMNATIVE_CALL_ARG3_GREG ARMV8_A64_REG_X3
|
---|
360 | # define IEMNATIVE_CALL_ARG4_GREG ARMV8_A64_REG_X4
|
---|
361 | # define IEMNATIVE_CALL_ARG5_GREG ARMV8_A64_REG_X5
|
---|
362 | # define IEMNATIVE_CALL_ARG6_GREG ARMV8_A64_REG_X6
|
---|
363 | # define IEMNATIVE_CALL_ARG7_GREG ARMV8_A64_REG_X7
|
---|
364 | # define IEMNATIVE_CALL_ARGS_GREG_MASK ( RT_BIT_32(ARMV8_A64_REG_X0) \
|
---|
365 | | RT_BIT_32(ARMV8_A64_REG_X1) \
|
---|
366 | | RT_BIT_32(ARMV8_A64_REG_X2) \
|
---|
367 | | RT_BIT_32(ARMV8_A64_REG_X3) \
|
---|
368 | | RT_BIT_32(ARMV8_A64_REG_X4) \
|
---|
369 | | RT_BIT_32(ARMV8_A64_REG_X5) \
|
---|
370 | | RT_BIT_32(ARMV8_A64_REG_X6) \
|
---|
371 | | RT_BIT_32(ARMV8_A64_REG_X7) )
|
---|
372 | # define IEMNATIVE_CALL_VOLATILE_GREG_MASK ( RT_BIT_32(ARMV8_A64_REG_X0) \
|
---|
373 | | RT_BIT_32(ARMV8_A64_REG_X1) \
|
---|
374 | | RT_BIT_32(ARMV8_A64_REG_X2) \
|
---|
375 | | RT_BIT_32(ARMV8_A64_REG_X3) \
|
---|
376 | | RT_BIT_32(ARMV8_A64_REG_X4) \
|
---|
377 | | RT_BIT_32(ARMV8_A64_REG_X5) \
|
---|
378 | | RT_BIT_32(ARMV8_A64_REG_X6) \
|
---|
379 | | RT_BIT_32(ARMV8_A64_REG_X7) \
|
---|
380 | | RT_BIT_32(ARMV8_A64_REG_X8) \
|
---|
381 | | RT_BIT_32(ARMV8_A64_REG_X9) \
|
---|
382 | | RT_BIT_32(ARMV8_A64_REG_X10) \
|
---|
383 | | RT_BIT_32(ARMV8_A64_REG_X11) \
|
---|
384 | | RT_BIT_32(ARMV8_A64_REG_X12) \
|
---|
385 | | RT_BIT_32(ARMV8_A64_REG_X13) \
|
---|
386 | | RT_BIT_32(ARMV8_A64_REG_X14) \
|
---|
387 | | RT_BIT_32(ARMV8_A64_REG_X15) \
|
---|
388 | | RT_BIT_32(ARMV8_A64_REG_X16) \
|
---|
389 | | RT_BIT_32(ARMV8_A64_REG_X17) )
|
---|
390 | # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
391 | /* The low 64 bits of v8 - v15 marked as callee saved but the rest is volatile,
|
---|
392 | * so to simplify our life a bit we just mark everything as volatile. */
|
---|
393 | # define IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK (UINT32_C(0xffffffff))
|
---|
394 | # endif
|
---|
395 |
|
---|
396 | #endif
|
---|
397 |
|
---|
398 | /** This is the maximum argument count we'll ever be needing. */
|
---|
399 | #define IEMNATIVE_CALL_MAX_ARG_COUNT 7
|
---|
400 | #ifdef RT_OS_WINDOWS
|
---|
401 | # ifdef VBOXSTRICTRC_STRICT_ENABLED
|
---|
402 | # undef IEMNATIVE_CALL_MAX_ARG_COUNT
|
---|
403 | # define IEMNATIVE_CALL_MAX_ARG_COUNT 8
|
---|
404 | # endif
|
---|
405 | #endif
|
---|
406 | /** @} */
|
---|
407 |
|
---|
408 |
|
---|
409 | /** @def IEMNATIVE_HST_GREG_COUNT
|
---|
410 | * Number of host general purpose registers we tracker. */
|
---|
411 | /** @def IEMNATIVE_HST_GREG_MASK
|
---|
412 | * Mask corresponding to IEMNATIVE_HST_GREG_COUNT that can be applied to
|
---|
413 | * inverted register masks and such to get down to a correct set of regs. */
|
---|
414 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
415 | /** @def IEMNATIVE_HST_SIMD_REG_COUNT
|
---|
416 | * Number of host SIMD registers we track. */
|
---|
417 | /** @def IEMNATIVE_HST_SIMD_REG_MASK
|
---|
418 | * Mask corresponding to IEMNATIVE_HST_SIMD_REG_COUNT that can be applied to
|
---|
419 | * inverted register masks and such to get down to a correct set of regs. */
|
---|
420 | #endif
|
---|
421 | #ifdef RT_ARCH_AMD64
|
---|
422 | # define IEMNATIVE_HST_GREG_COUNT 16
|
---|
423 | # define IEMNATIVE_HST_GREG_MASK UINT32_C(0xffff)
|
---|
424 |
|
---|
425 | # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
426 | # define IEMNATIVE_HST_SIMD_REG_COUNT 16
|
---|
427 | # define IEMNATIVE_HST_SIMD_REG_MASK UINT32_C(0xffff)
|
---|
428 | # endif
|
---|
429 |
|
---|
430 | #elif defined(RT_ARCH_ARM64)
|
---|
431 | # define IEMNATIVE_HST_GREG_COUNT 32
|
---|
432 | # define IEMNATIVE_HST_GREG_MASK UINT32_MAX
|
---|
433 |
|
---|
434 | # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
435 | # define IEMNATIVE_HST_SIMD_REG_COUNT 32
|
---|
436 | # define IEMNATIVE_HST_SIMD_REG_MASK UINT32_MAX
|
---|
437 | # endif
|
---|
438 |
|
---|
439 | #else
|
---|
440 | # error "Port me!"
|
---|
441 | #endif
|
---|
442 |
|
---|
443 |
|
---|
444 | #ifndef RT_IN_ASSEMBLER /* ASM-NOINC-START - the rest of the file */
|
---|
445 |
|
---|
446 |
|
---|
447 | /** Native code generator label types. */
|
---|
448 | typedef enum
|
---|
449 | {
|
---|
450 | kIemNativeLabelType_Invalid = 0,
|
---|
451 | /*
|
---|
452 | * Labels w/o data, only once instance per TB.
|
---|
453 | *
|
---|
454 | * Note! Jumps to these requires instructions that are capable of spanning
|
---|
455 | * the max TB length.
|
---|
456 | */
|
---|
457 | /* Simple labels comes first for indexing reasons. RaiseXx is order by the exception's numerical value(s). */
|
---|
458 | kIemNativeLabelType_RaiseDe, /**< Raise (throw) X86_XCPT_DE (00h). */
|
---|
459 | kIemNativeLabelType_RaiseUd, /**< Raise (throw) X86_XCPT_UD (06h). */
|
---|
460 | kIemNativeLabelType_RaiseSseRelated, /**< Raise (throw) X86_XCPT_UD or X86_XCPT_NM according to cr0 & cr4. */
|
---|
461 | kIemNativeLabelType_RaiseAvxRelated, /**< Raise (throw) X86_XCPT_UD or X86_XCPT_NM according to xcr0, cr0 & cr4. */
|
---|
462 | kIemNativeLabelType_RaiseSseAvxFpRelated, /**< Raise (throw) X86_XCPT_UD or X86_XCPT_XF according to c4. */
|
---|
463 | kIemNativeLabelType_RaiseNm, /**< Raise (throw) X86_XCPT_NM (07h). */
|
---|
464 | kIemNativeLabelType_RaiseGp0, /**< Raise (throw) X86_XCPT_GP (0dh) w/ errcd=0. */
|
---|
465 | kIemNativeLabelType_RaiseMf, /**< Raise (throw) X86_XCPT_MF (10h). */
|
---|
466 | kIemNativeLabelType_RaiseXf, /**< Raise (throw) X86_XCPT_XF (13h). */
|
---|
467 | kIemNativeLabelType_ObsoleteTb,
|
---|
468 | kIemNativeLabelType_NeedCsLimChecking,
|
---|
469 | kIemNativeLabelType_CheckBranchMiss,
|
---|
470 | kIemNativeLabelType_LastSimple = kIemNativeLabelType_CheckBranchMiss,
|
---|
471 | /* Manually defined labels. */
|
---|
472 | kIemNativeLabelType_Return,
|
---|
473 | kIemNativeLabelType_ReturnBreak,
|
---|
474 | kIemNativeLabelType_ReturnBreakFF,
|
---|
475 | kIemNativeLabelType_ReturnWithFlags,
|
---|
476 | kIemNativeLabelType_NonZeroRetOrPassUp,
|
---|
477 | /** The last fixup for branches that can span almost the whole TB length. */
|
---|
478 | kIemNativeLabelType_LastWholeTbBranch = kIemNativeLabelType_NonZeroRetOrPassUp,
|
---|
479 |
|
---|
480 | /*
|
---|
481 | * Labels with data, potentially multiple instances per TB:
|
---|
482 | *
|
---|
483 | * These are localized labels, so no fixed jump type restrictions here.
|
---|
484 | */
|
---|
485 | kIemNativeLabelType_FirstWithMultipleInstances,
|
---|
486 | kIemNativeLabelType_If = kIemNativeLabelType_FirstWithMultipleInstances,
|
---|
487 | kIemNativeLabelType_Else,
|
---|
488 | kIemNativeLabelType_Endif,
|
---|
489 | kIemNativeLabelType_CheckIrq,
|
---|
490 | kIemNativeLabelType_TlbLookup,
|
---|
491 | kIemNativeLabelType_TlbMiss,
|
---|
492 | kIemNativeLabelType_TlbDone,
|
---|
493 | kIemNativeLabelType_End
|
---|
494 | } IEMNATIVELABELTYPE;
|
---|
495 |
|
---|
496 | /** Native code generator label definition. */
|
---|
497 | typedef struct IEMNATIVELABEL
|
---|
498 | {
|
---|
499 | /** Code offset if defined, UINT32_MAX if it needs to be generated after/in
|
---|
500 | * the epilog. */
|
---|
501 | uint32_t off;
|
---|
502 | /** The type of label (IEMNATIVELABELTYPE). */
|
---|
503 | uint16_t enmType;
|
---|
504 | /** Additional label data, type specific. */
|
---|
505 | uint16_t uData;
|
---|
506 | } IEMNATIVELABEL;
|
---|
507 | /** Pointer to a label. */
|
---|
508 | typedef IEMNATIVELABEL *PIEMNATIVELABEL;
|
---|
509 |
|
---|
510 |
|
---|
511 | /** Native code generator fixup types. */
|
---|
512 | typedef enum
|
---|
513 | {
|
---|
514 | kIemNativeFixupType_Invalid = 0,
|
---|
515 | #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
|
---|
516 | /** AMD64 fixup: PC relative 32-bit with addend in bData. */
|
---|
517 | kIemNativeFixupType_Rel32,
|
---|
518 | #elif defined(RT_ARCH_ARM64)
|
---|
519 | /** ARM64 fixup: PC relative offset at bits 25:0 (B, BL). */
|
---|
520 | kIemNativeFixupType_RelImm26At0,
|
---|
521 | /** ARM64 fixup: PC relative offset at bits 23:5 (CBZ, CBNZ, B.CC). */
|
---|
522 | kIemNativeFixupType_RelImm19At5,
|
---|
523 | /** ARM64 fixup: PC relative offset at bits 18:5 (TBZ, TBNZ). */
|
---|
524 | kIemNativeFixupType_RelImm14At5,
|
---|
525 | #endif
|
---|
526 | kIemNativeFixupType_End
|
---|
527 | } IEMNATIVEFIXUPTYPE;
|
---|
528 |
|
---|
529 | /** Native code generator fixup. */
|
---|
530 | typedef struct IEMNATIVEFIXUP
|
---|
531 | {
|
---|
532 | /** Code offset of the fixup location. */
|
---|
533 | uint32_t off;
|
---|
534 | /** The IEMNATIVELABEL this is a fixup for. */
|
---|
535 | uint16_t idxLabel;
|
---|
536 | /** The fixup type (IEMNATIVEFIXUPTYPE). */
|
---|
537 | uint8_t enmType;
|
---|
538 | /** Addend or other data. */
|
---|
539 | int8_t offAddend;
|
---|
540 | } IEMNATIVEFIXUP;
|
---|
541 | /** Pointer to a native code generator fixup. */
|
---|
542 | typedef IEMNATIVEFIXUP *PIEMNATIVEFIXUP;
|
---|
543 |
|
---|
544 |
|
---|
545 | /**
|
---|
546 | * One bit of the state.
|
---|
547 | *
|
---|
548 | * Each register state takes up two bits. We keep the two bits in two separate
|
---|
549 | * 64-bit words to simplify applying them to the guest shadow register mask in
|
---|
550 | * the register allocator.
|
---|
551 | */
|
---|
552 | typedef union IEMLIVENESSBIT
|
---|
553 | {
|
---|
554 | uint64_t bm64;
|
---|
555 | RT_GCC_EXTENSION struct
|
---|
556 | { /* bit no */
|
---|
557 | uint64_t bmGprs : 16; /**< 0x00 / 0: The 16 general purpose registers. */
|
---|
558 | uint64_t fUnusedPc : 1; /**< 0x10 / 16: (PC in ) */
|
---|
559 | uint64_t fCr0 : 1; /**< 0x11 / 17: */
|
---|
560 | uint64_t fFcw : 1; /**< 0x12 / 18: */
|
---|
561 | uint64_t fFsw : 1; /**< 0x13 / 19: */
|
---|
562 | uint64_t bmSegBase : 6; /**< 0x14 / 20: */
|
---|
563 | uint64_t bmSegAttrib : 6; /**< 0x1a / 26: */
|
---|
564 | uint64_t bmSegLimit : 6; /**< 0x20 / 32: */
|
---|
565 | uint64_t bmSegSel : 6; /**< 0x26 / 38: */
|
---|
566 | uint64_t fCr4 : 1; /**< 0x2c / 44: */
|
---|
567 | uint64_t fXcr0 : 1; /**< 0x2d / 45: */
|
---|
568 | uint64_t fMxCsr : 1; /**< 0x2e / 46: */
|
---|
569 | uint64_t fEflOther : 1; /**< 0x2f / 47: Other EFLAGS bits (~X86_EFL_STATUS_BITS & X86_EFL_LIVE_MASK). First! */
|
---|
570 | uint64_t fEflCf : 1; /**< 0x30 / 48: Carry flag (X86_EFL_CF / 0). */
|
---|
571 | uint64_t fEflPf : 1; /**< 0x31 / 49: Parity flag (X86_EFL_PF / 2). */
|
---|
572 | uint64_t fEflAf : 1; /**< 0x32 / 50: Auxilary carry flag (X86_EFL_AF / 4). */
|
---|
573 | uint64_t fEflZf : 1; /**< 0x33 / 51: Zero flag (X86_EFL_ZF / 6). */
|
---|
574 | uint64_t fEflSf : 1; /**< 0x34 / 52: Signed flag (X86_EFL_SF / 7). */
|
---|
575 | uint64_t fEflOf : 1; /**< 0x35 / 53: Overflow flag (X86_EFL_OF / 12). */
|
---|
576 | uint64_t uUnused : 10; /* 0x36 / 54 -> 0x40/64 */
|
---|
577 | };
|
---|
578 | } IEMLIVENESSBIT;
|
---|
579 | AssertCompileSize(IEMLIVENESSBIT, 8);
|
---|
580 |
|
---|
581 | #define IEMLIVENESSBIT_IDX_EFL_OTHER ((unsigned)kIemNativeGstReg_EFlags + 0)
|
---|
582 | #define IEMLIVENESSBIT_IDX_EFL_CF ((unsigned)kIemNativeGstReg_EFlags + 1)
|
---|
583 | #define IEMLIVENESSBIT_IDX_EFL_PF ((unsigned)kIemNativeGstReg_EFlags + 2)
|
---|
584 | #define IEMLIVENESSBIT_IDX_EFL_AF ((unsigned)kIemNativeGstReg_EFlags + 3)
|
---|
585 | #define IEMLIVENESSBIT_IDX_EFL_ZF ((unsigned)kIemNativeGstReg_EFlags + 4)
|
---|
586 | #define IEMLIVENESSBIT_IDX_EFL_SF ((unsigned)kIemNativeGstReg_EFlags + 5)
|
---|
587 | #define IEMLIVENESSBIT_IDX_EFL_OF ((unsigned)kIemNativeGstReg_EFlags + 6)
|
---|
588 |
|
---|
589 |
|
---|
590 | /**
|
---|
591 | * A liveness state entry.
|
---|
592 | *
|
---|
593 | * The first 128 bits runs parallel to kIemNativeGstReg_xxx for the most part.
|
---|
594 | * Once we add a SSE register shadowing, we'll add another 64-bit element for
|
---|
595 | * that.
|
---|
596 | */
|
---|
597 | typedef union IEMLIVENESSENTRY
|
---|
598 | {
|
---|
599 | #ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
600 | uint64_t bm64[16 / 8];
|
---|
601 | uint16_t bm32[16 / 4];
|
---|
602 | uint16_t bm16[16 / 2];
|
---|
603 | uint8_t bm8[ 16 / 1];
|
---|
604 | IEMLIVENESSBIT aBits[2];
|
---|
605 | #else
|
---|
606 | uint64_t bm64[32 / 8];
|
---|
607 | uint16_t bm32[32 / 4];
|
---|
608 | uint16_t bm16[32 / 2];
|
---|
609 | uint8_t bm8[ 32 / 1];
|
---|
610 | IEMLIVENESSBIT aBits[4];
|
---|
611 | #endif
|
---|
612 | RT_GCC_EXTENSION struct
|
---|
613 | {
|
---|
614 | /** Bit \#0 of the register states. */
|
---|
615 | IEMLIVENESSBIT Bit0;
|
---|
616 | /** Bit \#1 of the register states. */
|
---|
617 | IEMLIVENESSBIT Bit1;
|
---|
618 | #ifdef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
619 | /** Bit \#2 of the register states. */
|
---|
620 | IEMLIVENESSBIT Bit2;
|
---|
621 | /** Bit \#3 of the register states. */
|
---|
622 | IEMLIVENESSBIT Bit3;
|
---|
623 | #endif
|
---|
624 | };
|
---|
625 | } IEMLIVENESSENTRY;
|
---|
626 | #ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
627 | AssertCompileSize(IEMLIVENESSENTRY, 16);
|
---|
628 | #else
|
---|
629 | AssertCompileSize(IEMLIVENESSENTRY, 32);
|
---|
630 | #endif
|
---|
631 | /** Pointer to a liveness state entry. */
|
---|
632 | typedef IEMLIVENESSENTRY *PIEMLIVENESSENTRY;
|
---|
633 | /** Pointer to a const liveness state entry. */
|
---|
634 | typedef IEMLIVENESSENTRY const *PCIEMLIVENESSENTRY;
|
---|
635 |
|
---|
636 | /** @name 64-bit value masks for IEMLIVENESSENTRY.
|
---|
637 | * @{ */ /* 0xzzzzyyyyxxxxwwww */
|
---|
638 | #define IEMLIVENESSBIT_MASK UINT64_C(0x003ffffffffeffff)
|
---|
639 |
|
---|
640 | #ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
641 | # define IEMLIVENESSBIT0_XCPT_OR_CALL UINT64_C(0x0000000000000000)
|
---|
642 | # define IEMLIVENESSBIT1_XCPT_OR_CALL IEMLIVENESSBIT_MASK
|
---|
643 |
|
---|
644 | # define IEMLIVENESSBIT0_ALL_UNUSED IEMLIVENESSBIT_MASK
|
---|
645 | # define IEMLIVENESSBIT1_ALL_UNUSED UINT64_C(0x0000000000000000)
|
---|
646 | #endif
|
---|
647 |
|
---|
648 | #define IEMLIVENESSBIT_ALL_EFL_MASK UINT64_C(0x003f800000000000)
|
---|
649 | #define IEMLIVENESSBIT_STATUS_EFL_MASK UINT64_C(0x003f000000000000)
|
---|
650 |
|
---|
651 | #ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
652 | # define IEMLIVENESSBIT0_ALL_EFL_INPUT IEMLIVENESSBIT_ALL_EFL_MASK
|
---|
653 | # define IEMLIVENESSBIT1_ALL_EFL_INPUT IEMLIVENESSBIT_ALL_EFL_MASK
|
---|
654 | #endif
|
---|
655 | /** @} */
|
---|
656 |
|
---|
657 |
|
---|
658 | /** @name The liveness state for a register.
|
---|
659 | *
|
---|
660 | * The state values have been picked to with state accumulation in mind (what
|
---|
661 | * the iemNativeLivenessFunc_xxxx functions does), as that is the most
|
---|
662 | * performance critical work done with the values.
|
---|
663 | *
|
---|
664 | * This is a compressed state that only requires 2 bits per register.
|
---|
665 | * When accumulating state, we'll be using three IEMLIVENESSENTRY copies:
|
---|
666 | * 1. the incoming state from the following call,
|
---|
667 | * 2. the outgoing state for this call,
|
---|
668 | * 3. mask of the entries set in the 2nd.
|
---|
669 | *
|
---|
670 | * The mask entry (3rd one above) will be used both when updating the outgoing
|
---|
671 | * state and when merging in incoming state for registers not touched by the
|
---|
672 | * current call.
|
---|
673 | *
|
---|
674 | * @{ */
|
---|
675 | #ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
676 | /** The register will be clobbered and the current value thrown away.
|
---|
677 | *
|
---|
678 | * When this is applied to the state (2) we'll simply be AND'ing it with the
|
---|
679 | * (old) mask (3) and adding the register to the mask. This way we'll
|
---|
680 | * preserve the high priority IEMLIVENESS_STATE_XCPT_OR_CALL and
|
---|
681 | * IEMLIVENESS_STATE_INPUT states. */
|
---|
682 | # define IEMLIVENESS_STATE_CLOBBERED 0
|
---|
683 | /** The register is unused in the remainder of the TB.
|
---|
684 | *
|
---|
685 | * This is an initial state and can not be set by any of the
|
---|
686 | * iemNativeLivenessFunc_xxxx callbacks. */
|
---|
687 | # define IEMLIVENESS_STATE_UNUSED 1
|
---|
688 | /** The register value is required in a potential call or exception.
|
---|
689 | *
|
---|
690 | * This means that the register value must be calculated and is best written to
|
---|
691 | * the state, but that any shadowing registers can be flushed thereafter as it's
|
---|
692 | * not used again. This state has lower priority than IEMLIVENESS_STATE_INPUT.
|
---|
693 | *
|
---|
694 | * It is typically applied across the board, but we preserve incoming
|
---|
695 | * IEMLIVENESS_STATE_INPUT values. This latter means we have to do some extra
|
---|
696 | * trickery to filter out IEMLIVENESS_STATE_UNUSED:
|
---|
697 | * 1. r0 = old & ~mask;
|
---|
698 | * 2. r0 = t1 & (t1 >> 1)'
|
---|
699 | * 3. state |= r0 | 0b10;
|
---|
700 | * 4. mask = ~0;
|
---|
701 | */
|
---|
702 | # define IEMLIVENESS_STATE_XCPT_OR_CALL 2
|
---|
703 | /** The register value is used as input.
|
---|
704 | *
|
---|
705 | * This means that the register value must be calculated and it is best to keep
|
---|
706 | * it in a register. It does not need to be writtent out as such. This is the
|
---|
707 | * highest priority state.
|
---|
708 | *
|
---|
709 | * Whether the call modifies the register or not isn't relevant to earlier
|
---|
710 | * calls, so that's not recorded.
|
---|
711 | *
|
---|
712 | * When applying this state we just or in the value in the outgoing state and
|
---|
713 | * mask. */
|
---|
714 | # define IEMLIVENESS_STATE_INPUT 3
|
---|
715 | /** Mask of the state bits. */
|
---|
716 | # define IEMLIVENESS_STATE_MASK 3
|
---|
717 | /** The number of bits per state. */
|
---|
718 | # define IEMLIVENESS_STATE_BIT_COUNT 2
|
---|
719 | /** Check if we're expecting read & write accesses to a register with the given (previous) liveness state. */
|
---|
720 | # define IEMLIVENESS_STATE_IS_MODIFY_EXPECTED(a_uState) ((uint32_t)((a_uState) - 1U) >= (uint32_t)(IEMLIVENESS_STATE_INPUT - 1U))
|
---|
721 | /** Check if we're expecting read accesses to a register with the given (previous) liveness state. */
|
---|
722 | # define IEMLIVENESS_STATE_IS_INPUT_EXPECTED(a_uState) IEMLIVENESS_STATE_IS_MODIFY_EXPECTED(a_uState)
|
---|
723 | /** Check if a register clobbering is expected given the (previous) liveness state.
|
---|
724 | * The state must be either CLOBBERED or XCPT_OR_CALL, but it may also
|
---|
725 | * include INPUT if the register is used in more than one place. */
|
---|
726 | # define IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(a_uState) ((uint32_t)(a_uState) != IEMLIVENESS_STATE_UNUSED)
|
---|
727 |
|
---|
728 | /** Check if all status flags are going to be clobbered and doesn't need
|
---|
729 | * calculating in the current step.
|
---|
730 | * @param a_pCurEntry The current liveness entry. */
|
---|
731 | # define IEMLIVENESS_STATE_ARE_STATUS_EFL_TO_BE_CLOBBERED(a_pCurEntry) \
|
---|
732 | ( (((a_pCurEntry)->Bit0.bm64 | (a_pCurEntry)->Bit1.bm64) & IEMLIVENESSBIT_STATUS_EFL_MASK) == 0 )
|
---|
733 |
|
---|
734 | #else /* IEMLIVENESS_EXTENDED_LAYOUT */
|
---|
735 | /** The register is not used any more. */
|
---|
736 | # define IEMLIVENESS_STATE_UNUSED 0
|
---|
737 | /** Flag: The register is required in a potential exception or call. */
|
---|
738 | # define IEMLIVENESS_STATE_POT_XCPT_OR_CALL 1
|
---|
739 | # define IEMLIVENESS_BIT_POT_XCPT_OR_CALL 0
|
---|
740 | /** Flag: The register is read. */
|
---|
741 | # define IEMLIVENESS_STATE_READ 2
|
---|
742 | # define IEMLIVENESS_BIT_READ 1
|
---|
743 | /** Flag: The register is written. */
|
---|
744 | # define IEMLIVENESS_STATE_WRITE 4
|
---|
745 | # define IEMLIVENESS_BIT_WRITE 2
|
---|
746 | /** Flag: Unconditional call (not needed, can be redefined for research). */
|
---|
747 | # define IEMLIVENESS_STATE_CALL 8
|
---|
748 | # define IEMLIVENESS_BIT_CALL 3
|
---|
749 | # define IEMLIVENESS_BIT_OTHER 3 /**< More convenient name for this one. */
|
---|
750 | # define IEMLIVENESS_STATE_IS_MODIFY_EXPECTED(a_uState) \
|
---|
751 | ( ((a_uState) & (IEMLIVENESS_STATE_WRITE | IEMLIVENESS_STATE_READ)) == (IEMLIVENESS_STATE_WRITE | IEMLIVENESS_STATE_READ) )
|
---|
752 | # define IEMLIVENESS_STATE_IS_INPUT_EXPECTED(a_uState) RT_BOOL((a_uState) & IEMLIVENESS_STATE_READ)
|
---|
753 | # define IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(a_uState) RT_BOOL((a_uState) & IEMLIVENESS_STATE_WRITE)
|
---|
754 |
|
---|
755 | # define IEMLIVENESS_STATE_ARE_STATUS_EFL_TO_BE_CLOBBERED(a_pCurEntry) \
|
---|
756 | ( ((a_pCurEntry)->aBits[IEMLIVENESS_BIT_WRITE].bm64 & IEMLIVENESSBIT_STATUS_EFL_MASK) == IEMLIVENESSBIT_STATUS_EFL_MASK \
|
---|
757 | && !( ((a_pCurEntry)->aBits[IEMLIVENESS_BIT_READ].bm64 | (a_pCurEntry)->aBits[IEMLIVENESS_BIT_POT_XCPT_OR_CALL].bm64) \
|
---|
758 | & IEMLIVENESSBIT_STATUS_EFL_MASK) )
|
---|
759 |
|
---|
760 | #endif /* IEMLIVENESS_EXTENDED_LAYOUT */
|
---|
761 | /** @} */
|
---|
762 |
|
---|
763 | /** @name Liveness helpers for builtin functions and similar.
|
---|
764 | *
|
---|
765 | * These are not used by IEM_MC_BEGIN/END blocks, IEMAllN8veLiveness.cpp has its
|
---|
766 | * own set of manimulator macros for those.
|
---|
767 | *
|
---|
768 | * @{ */
|
---|
769 | /** Initializing the state as all unused. */
|
---|
770 | #ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
771 | # define IEM_LIVENESS_RAW_INIT_AS_UNUSED(a_pOutgoing) \
|
---|
772 | do { \
|
---|
773 | (a_pOutgoing)->Bit0.bm64 = IEMLIVENESSBIT0_ALL_UNUSED; \
|
---|
774 | (a_pOutgoing)->Bit1.bm64 = IEMLIVENESSBIT1_ALL_UNUSED; \
|
---|
775 | } while (0)
|
---|
776 | #else
|
---|
777 | # define IEM_LIVENESS_RAW_INIT_AS_UNUSED(a_pOutgoing) \
|
---|
778 | do { \
|
---|
779 | (a_pOutgoing)->aBits[IEMLIVENESS_BIT_POT_XCPT_OR_CALL].bm64 = 0; \
|
---|
780 | (a_pOutgoing)->aBits[IEMLIVENESS_BIT_READ ].bm64 = 0; \
|
---|
781 | (a_pOutgoing)->aBits[IEMLIVENESS_BIT_WRITE ].bm64 = 0; \
|
---|
782 | (a_pOutgoing)->aBits[IEMLIVENESS_BIT_OTHER ].bm64 = 0; \
|
---|
783 | } while (0)
|
---|
784 | #endif
|
---|
785 |
|
---|
786 | /** Initializing the outgoing state with a potential xcpt or call state.
|
---|
787 | * This only works when all later changes will be IEMLIVENESS_STATE_INPUT. */
|
---|
788 | #ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
789 | # define IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(a_pOutgoing, a_pIncoming) \
|
---|
790 | do { \
|
---|
791 | (a_pOutgoing)->Bit0.bm64 = (a_pIncoming)->Bit0.bm64 & (a_pIncoming)->Bit1.bm64; \
|
---|
792 | (a_pOutgoing)->Bit1.bm64 = IEMLIVENESSBIT1_XCPT_OR_CALL; \
|
---|
793 | } while (0)
|
---|
794 | #else
|
---|
795 | # define IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(a_pOutgoing, a_pIncoming) \
|
---|
796 | do { \
|
---|
797 | (a_pOutgoing)->aBits[IEMLIVENESS_BIT_POT_XCPT_OR_CALL].bm64 = IEMLIVENESSBIT_MASK; \
|
---|
798 | (a_pOutgoing)->aBits[IEMLIVENESS_BIT_READ ].bm64 = (a_pIncoming)->aBits[IEMLIVENESS_BIT_READ].bm64; \
|
---|
799 | (a_pOutgoing)->aBits[IEMLIVENESS_BIT_WRITE ].bm64 = 0; \
|
---|
800 | (a_pOutgoing)->aBits[IEMLIVENESS_BIT_OTHER ].bm64 = 0; \
|
---|
801 | } while (0)
|
---|
802 | #endif
|
---|
803 |
|
---|
804 | /** Adds a segment base register as input to the outgoing state. */
|
---|
805 | #ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
806 | # define IEM_LIVENESS_RAW_SEG_BASE_INPUT(a_pOutgoing, a_iSReg) do { \
|
---|
807 | (a_pOutgoing)->Bit0.bmSegBase |= RT_BIT_64(a_iSReg); \
|
---|
808 | (a_pOutgoing)->Bit1.bmSegBase |= RT_BIT_64(a_iSReg); \
|
---|
809 | } while (0)
|
---|
810 | #else
|
---|
811 | # define IEM_LIVENESS_RAW_SEG_BASE_INPUT(a_pOutgoing, a_iSReg) do { \
|
---|
812 | (a_pOutgoing)->aBits[IEMLIVENESS_BIT_READ].bmSegBase |= RT_BIT_64(a_iSReg); \
|
---|
813 | } while (0)
|
---|
814 | #endif
|
---|
815 |
|
---|
816 | /** Adds a segment attribute register as input to the outgoing state. */
|
---|
817 | #ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
818 | # define IEM_LIVENESS_RAW_SEG_ATTRIB_INPUT(a_pOutgoing, a_iSReg) do { \
|
---|
819 | (a_pOutgoing)->Bit0.bmSegAttrib |= RT_BIT_64(a_iSReg); \
|
---|
820 | (a_pOutgoing)->Bit1.bmSegAttrib |= RT_BIT_64(a_iSReg); \
|
---|
821 | } while (0)
|
---|
822 | #else
|
---|
823 | # define IEM_LIVENESS_RAW_SEG_ATTRIB_INPUT(a_pOutgoing, a_iSReg) do { \
|
---|
824 | (a_pOutgoing)->aBits[IEMLIVENESS_BIT_READ].bmSegAttrib |= RT_BIT_64(a_iSReg); \
|
---|
825 | } while (0)
|
---|
826 | #endif
|
---|
827 |
|
---|
828 | /** Adds a segment limit register as input to the outgoing state. */
|
---|
829 | #ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
830 | # define IEM_LIVENESS_RAW_SEG_LIMIT_INPUT(a_pOutgoing, a_iSReg) do { \
|
---|
831 | (a_pOutgoing)->Bit0.bmSegLimit |= RT_BIT_64(a_iSReg); \
|
---|
832 | (a_pOutgoing)->Bit1.bmSegLimit |= RT_BIT_64(a_iSReg); \
|
---|
833 | } while (0)
|
---|
834 | #else
|
---|
835 | # define IEM_LIVENESS_RAW_SEG_LIMIT_INPUT(a_pOutgoing, a_iSReg) do { \
|
---|
836 | (a_pOutgoing)->aBits[IEMLIVENESS_BIT_READ].bmSegLimit |= RT_BIT_64(a_iSReg); \
|
---|
837 | } while (0)
|
---|
838 | #endif
|
---|
839 |
|
---|
840 | /** Adds a segment limit register as input to the outgoing state. */
|
---|
841 | #ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
842 | # define IEM_LIVENESS_RAW_EFLAGS_ONE_INPUT(a_pOutgoing, a_fEflMember) do { \
|
---|
843 | (a_pOutgoing)->Bit0.a_fEflMember |= 1; \
|
---|
844 | (a_pOutgoing)->Bit1.a_fEflMember |= 1; \
|
---|
845 | } while (0)
|
---|
846 | #else
|
---|
847 | # define IEM_LIVENESS_RAW_EFLAGS_ONE_INPUT(a_pOutgoing, a_fEflMember) do { \
|
---|
848 | (a_pOutgoing)->aBits[IEMLIVENESS_BIT_READ].a_fEflMember |= 1; \
|
---|
849 | } while (0)
|
---|
850 | #endif
|
---|
851 | /** @} */
|
---|
852 |
|
---|
853 | /** @def IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK
|
---|
854 | * Checks that the EFLAGS bits specified by @a a_fEflNeeded are actually
|
---|
855 | * calculated and up to date. This is to double check that we haven't skipped
|
---|
856 | * EFLAGS calculations when we actually need them. NOP in non-strict builds.
|
---|
857 | * @note has to be placed in
|
---|
858 | */
|
---|
859 | #ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING
|
---|
860 | # define IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(a_pReNative, a_off, a_fEflNeeded) \
|
---|
861 | do { (a_off) = iemNativeEmitEFlagsSkippingCheck(a_pReNative, a_off, a_fEflNeeded); } while (0)
|
---|
862 | #else
|
---|
863 | # define IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(a_pReNative, a_off, a_fEflNeeded) do { } while (0)
|
---|
864 | #endif
|
---|
865 |
|
---|
866 |
|
---|
867 | /**
|
---|
868 | * Guest registers that can be shadowed in GPRs.
|
---|
869 | *
|
---|
870 | * This runs parallel to the liveness state (IEMLIVENESSBIT, ++). The EFlags
|
---|
871 | * must be placed last, as the liveness state tracks it as 7 subcomponents and
|
---|
872 | * we don't want to waste space here.
|
---|
873 | *
|
---|
874 | * @note Make sure to update IEMLIVENESSBIT, IEMLIVENESSBIT_ALL_EFL_MASK and
|
---|
875 | * friends as well as IEMAllN8veLiveness.cpp.
|
---|
876 | */
|
---|
877 | typedef enum IEMNATIVEGSTREG : uint8_t
|
---|
878 | {
|
---|
879 | kIemNativeGstReg_GprFirst = 0,
|
---|
880 | kIemNativeGstReg_GprLast = kIemNativeGstReg_GprFirst + 15,
|
---|
881 | kIemNativeGstReg_Pc,
|
---|
882 | kIemNativeGstReg_Cr0,
|
---|
883 | kIemNativeGstReg_FpuFcw,
|
---|
884 | kIemNativeGstReg_FpuFsw,
|
---|
885 | kIemNativeGstReg_SegBaseFirst,
|
---|
886 | kIemNativeGstReg_SegBaseLast = kIemNativeGstReg_SegBaseFirst + 5,
|
---|
887 | kIemNativeGstReg_SegAttribFirst,
|
---|
888 | kIemNativeGstReg_SegAttribLast = kIemNativeGstReg_SegAttribFirst + 5,
|
---|
889 | kIemNativeGstReg_SegLimitFirst,
|
---|
890 | kIemNativeGstReg_SegLimitLast = kIemNativeGstReg_SegLimitFirst + 5,
|
---|
891 | kIemNativeGstReg_SegSelFirst,
|
---|
892 | kIemNativeGstReg_SegSelLast = kIemNativeGstReg_SegSelFirst + 5,
|
---|
893 | kIemNativeGstReg_Cr4,
|
---|
894 | kIemNativeGstReg_Xcr0,
|
---|
895 | kIemNativeGstReg_MxCsr,
|
---|
896 | kIemNativeGstReg_EFlags, /**< 32-bit, includes internal flags - last! */
|
---|
897 | kIemNativeGstReg_End
|
---|
898 | } IEMNATIVEGSTREG;
|
---|
899 | AssertCompile((int)kIemNativeGstReg_SegLimitFirst == 32);
|
---|
900 | AssertCompile((UINT64_C(0x7f) << kIemNativeGstReg_EFlags) == IEMLIVENESSBIT_ALL_EFL_MASK);
|
---|
901 |
|
---|
902 | /** @name Helpers for converting register numbers to IEMNATIVEGSTREG values.
|
---|
903 | * @{ */
|
---|
904 | #define IEMNATIVEGSTREG_GPR(a_iGpr) ((IEMNATIVEGSTREG)(kIemNativeGstReg_GprFirst + (a_iGpr) ))
|
---|
905 | #define IEMNATIVEGSTREG_SEG_SEL(a_iSegReg) ((IEMNATIVEGSTREG)(kIemNativeGstReg_SegSelFirst + (a_iSegReg) ))
|
---|
906 | #define IEMNATIVEGSTREG_SEG_BASE(a_iSegReg) ((IEMNATIVEGSTREG)(kIemNativeGstReg_SegBaseFirst + (a_iSegReg) ))
|
---|
907 | #define IEMNATIVEGSTREG_SEG_LIMIT(a_iSegReg) ((IEMNATIVEGSTREG)(kIemNativeGstReg_SegLimitFirst + (a_iSegReg) ))
|
---|
908 | #define IEMNATIVEGSTREG_SEG_ATTRIB(a_iSegReg) ((IEMNATIVEGSTREG)(kIemNativeGstReg_SegAttribFirst + (a_iSegReg) ))
|
---|
909 | /** @} */
|
---|
910 |
|
---|
911 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
912 |
|
---|
913 | /**
|
---|
914 | * Guest registers that can be shadowed in host SIMD registers.
|
---|
915 | *
|
---|
916 | * @todo r=aeichner Liveness tracking
|
---|
917 | * @todo r=aeichner Given that we can only track xmm/ymm here does this actually make sense?
|
---|
918 | */
|
---|
919 | typedef enum IEMNATIVEGSTSIMDREG : uint8_t
|
---|
920 | {
|
---|
921 | kIemNativeGstSimdReg_SimdRegFirst = 0,
|
---|
922 | kIemNativeGstSimdReg_SimdRegLast = kIemNativeGstSimdReg_SimdRegFirst + 15,
|
---|
923 | kIemNativeGstSimdReg_End
|
---|
924 | } IEMNATIVEGSTSIMDREG;
|
---|
925 |
|
---|
926 | /** @name Helpers for converting register numbers to IEMNATIVEGSTSIMDREG values.
|
---|
927 | * @{ */
|
---|
928 | #define IEMNATIVEGSTSIMDREG_SIMD(a_iSimdReg) ((IEMNATIVEGSTSIMDREG)(kIemNativeGstSimdReg_SimdRegFirst + (a_iSimdReg)))
|
---|
929 | /** @} */
|
---|
930 |
|
---|
931 | /**
|
---|
932 | * The Load/store size for a SIMD guest register.
|
---|
933 | */
|
---|
934 | typedef enum IEMNATIVEGSTSIMDREGLDSTSZ : uint8_t
|
---|
935 | {
|
---|
936 | /** Invalid size. */
|
---|
937 | kIemNativeGstSimdRegLdStSz_Invalid = 0,
|
---|
938 | /** Loads the low 128-bit of a guest SIMD register. */
|
---|
939 | kIemNativeGstSimdRegLdStSz_Low128,
|
---|
940 | /** Loads the high 128-bit of a guest SIMD register. */
|
---|
941 | kIemNativeGstSimdRegLdStSz_High128,
|
---|
942 | /** Loads the whole 256-bits of a guest SIMD register. */
|
---|
943 | kIemNativeGstSimdRegLdStSz_256,
|
---|
944 | /** End value. */
|
---|
945 | kIemNativeGstSimdRegLdStSz_End
|
---|
946 | } IEMNATIVEGSTSIMDREGLDSTSZ;
|
---|
947 |
|
---|
948 | #endif /* IEMNATIVE_WITH_SIMD_REG_ALLOCATOR */
|
---|
949 |
|
---|
950 | /**
|
---|
951 | * Intended use statement for iemNativeRegAllocTmpForGuestReg().
|
---|
952 | */
|
---|
953 | typedef enum IEMNATIVEGSTREGUSE
|
---|
954 | {
|
---|
955 | /** The usage is read-only, the register holding the guest register
|
---|
956 | * shadow copy will not be modified by the caller. */
|
---|
957 | kIemNativeGstRegUse_ReadOnly = 0,
|
---|
958 | /** The caller will update the guest register (think: PC += cbInstr).
|
---|
959 | * The guest shadow copy will follow the returned register. */
|
---|
960 | kIemNativeGstRegUse_ForUpdate,
|
---|
961 | /** The call will put an entirely new value in the guest register, so
|
---|
962 | * if new register is allocate it will be returned uninitialized. */
|
---|
963 | kIemNativeGstRegUse_ForFullWrite,
|
---|
964 | /** The caller will use the guest register value as input in a calculation
|
---|
965 | * and the host register will be modified.
|
---|
966 | * This means that the returned host register will not be marked as a shadow
|
---|
967 | * copy of the guest register. */
|
---|
968 | kIemNativeGstRegUse_Calculation
|
---|
969 | } IEMNATIVEGSTREGUSE;
|
---|
970 |
|
---|
971 | /**
|
---|
972 | * Guest registers (classes) that can be referenced.
|
---|
973 | */
|
---|
974 | typedef enum IEMNATIVEGSTREGREF : uint8_t
|
---|
975 | {
|
---|
976 | kIemNativeGstRegRef_Invalid = 0,
|
---|
977 | kIemNativeGstRegRef_Gpr,
|
---|
978 | kIemNativeGstRegRef_GprHighByte, /**< AH, CH, DH, BH*/
|
---|
979 | kIemNativeGstRegRef_EFlags,
|
---|
980 | kIemNativeGstRegRef_MxCsr,
|
---|
981 | kIemNativeGstRegRef_FpuReg,
|
---|
982 | kIemNativeGstRegRef_MReg,
|
---|
983 | kIemNativeGstRegRef_XReg,
|
---|
984 | kIemNativeGstRegRef_X87,
|
---|
985 | kIemNativeGstRegRef_XState,
|
---|
986 | //kIemNativeGstRegRef_YReg, - doesn't work.
|
---|
987 | kIemNativeGstRegRef_End
|
---|
988 | } IEMNATIVEGSTREGREF;
|
---|
989 |
|
---|
990 |
|
---|
991 | /** Variable kinds. */
|
---|
992 | typedef enum IEMNATIVEVARKIND : uint8_t
|
---|
993 | {
|
---|
994 | /** Customary invalid zero value. */
|
---|
995 | kIemNativeVarKind_Invalid = 0,
|
---|
996 | /** This is either in a register or on the stack. */
|
---|
997 | kIemNativeVarKind_Stack,
|
---|
998 | /** Immediate value - loaded into register when needed, or can live on the
|
---|
999 | * stack if referenced (in theory). */
|
---|
1000 | kIemNativeVarKind_Immediate,
|
---|
1001 | /** Variable reference - loaded into register when needed, never stack. */
|
---|
1002 | kIemNativeVarKind_VarRef,
|
---|
1003 | /** Guest register reference - loaded into register when needed, never stack. */
|
---|
1004 | kIemNativeVarKind_GstRegRef,
|
---|
1005 | /** End of valid values. */
|
---|
1006 | kIemNativeVarKind_End
|
---|
1007 | } IEMNATIVEVARKIND;
|
---|
1008 |
|
---|
1009 |
|
---|
1010 | /** Variable or argument. */
|
---|
1011 | typedef struct IEMNATIVEVAR
|
---|
1012 | {
|
---|
1013 | /** The kind of variable. */
|
---|
1014 | IEMNATIVEVARKIND enmKind;
|
---|
1015 | /** The variable size in bytes. */
|
---|
1016 | uint8_t cbVar;
|
---|
1017 | /** The first stack slot (uint64_t), except for immediate and references
|
---|
1018 | * where it usually is UINT8_MAX. This is allocated lazily, so if a variable
|
---|
1019 | * has a stack slot it has been initialized and has a value. Unused variables
|
---|
1020 | * has neither a stack slot nor a host register assignment. */
|
---|
1021 | uint8_t idxStackSlot;
|
---|
1022 | /** The host register allocated for the variable, UINT8_MAX if not. */
|
---|
1023 | uint8_t idxReg;
|
---|
1024 | /** The argument number if argument, UINT8_MAX if regular variable. */
|
---|
1025 | uint8_t uArgNo;
|
---|
1026 | /** If referenced, the index (unpacked) of the variable referencing this one,
|
---|
1027 | * otherwise UINT8_MAX. A referenced variable must only be placed on the stack
|
---|
1028 | * and must be either kIemNativeVarKind_Stack or kIemNativeVarKind_Immediate. */
|
---|
1029 | uint8_t idxReferrerVar;
|
---|
1030 | /** Guest register being shadowed here, kIemNativeGstReg_End(/UINT8_MAX) if not.
|
---|
1031 | * @todo not sure what this really is for... */
|
---|
1032 | IEMNATIVEGSTREG enmGstReg;
|
---|
1033 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1034 | /** Flag whether this variable is held in a SIMD register (only supported for 128-bit and 256-bit variables),
|
---|
1035 | * only valid when idxReg is not UINT8_MAX. */
|
---|
1036 | bool fSimdReg : 1;
|
---|
1037 | /** Set if the registered is currently used exclusively, false if the
|
---|
1038 | * variable is idle and the register can be grabbed. */
|
---|
1039 | bool fRegAcquired : 1;
|
---|
1040 | #else
|
---|
1041 | /** Set if the registered is currently used exclusively, false if the
|
---|
1042 | * variable is idle and the register can be grabbed. */
|
---|
1043 | bool fRegAcquired;
|
---|
1044 | #endif
|
---|
1045 |
|
---|
1046 | union
|
---|
1047 | {
|
---|
1048 | /** kIemNativeVarKind_Immediate: The immediate value. */
|
---|
1049 | uint64_t uValue;
|
---|
1050 | /** kIemNativeVarKind_VarRef: The index (unpacked) of the variable being referenced. */
|
---|
1051 | uint8_t idxRefVar;
|
---|
1052 | /** kIemNativeVarKind_GstRegRef: The guest register being referrenced. */
|
---|
1053 | struct
|
---|
1054 | {
|
---|
1055 | /** The class of register. */
|
---|
1056 | IEMNATIVEGSTREGREF enmClass;
|
---|
1057 | /** Index within the class. */
|
---|
1058 | uint8_t idx;
|
---|
1059 | } GstRegRef;
|
---|
1060 | } u;
|
---|
1061 | } IEMNATIVEVAR;
|
---|
1062 | /** Pointer to a variable or argument. */
|
---|
1063 | typedef IEMNATIVEVAR *PIEMNATIVEVAR;
|
---|
1064 | /** Pointer to a const variable or argument. */
|
---|
1065 | typedef IEMNATIVEVAR const *PCIEMNATIVEVAR;
|
---|
1066 |
|
---|
1067 | /** What is being kept in a host register. */
|
---|
1068 | typedef enum IEMNATIVEWHAT : uint8_t
|
---|
1069 | {
|
---|
1070 | /** The traditional invalid zero value. */
|
---|
1071 | kIemNativeWhat_Invalid = 0,
|
---|
1072 | /** Mapping a variable (IEMNATIVEHSTREG::idxVar). */
|
---|
1073 | kIemNativeWhat_Var,
|
---|
1074 | /** Temporary register, this is typically freed when a MC completes. */
|
---|
1075 | kIemNativeWhat_Tmp,
|
---|
1076 | /** Call argument w/o a variable mapping. This is free (via
|
---|
1077 | * IEMNATIVE_CALL_VOLATILE_GREG_MASK) after the call is emitted. */
|
---|
1078 | kIemNativeWhat_Arg,
|
---|
1079 | /** Return status code.
|
---|
1080 | * @todo not sure if we need this... */
|
---|
1081 | kIemNativeWhat_rc,
|
---|
1082 | /** The fixed pVCpu (PVMCPUCC) register.
|
---|
1083 | * @todo consider offsetting this on amd64 to use negative offsets to access
|
---|
1084 | * more members using 8-byte disp. */
|
---|
1085 | kIemNativeWhat_pVCpuFixed,
|
---|
1086 | /** The fixed pCtx (PCPUMCTX) register.
|
---|
1087 | * @todo consider offsetting this on amd64 to use negative offsets to access
|
---|
1088 | * more members using 8-byte disp. */
|
---|
1089 | kIemNativeWhat_pCtxFixed,
|
---|
1090 | /** Fixed temporary register. */
|
---|
1091 | kIemNativeWhat_FixedTmp,
|
---|
1092 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
1093 | /** Shadow RIP for the delayed RIP updating debugging. */
|
---|
1094 | kIemNativeWhat_PcShadow,
|
---|
1095 | #endif
|
---|
1096 | /** Register reserved by the CPU or OS architecture. */
|
---|
1097 | kIemNativeWhat_FixedReserved,
|
---|
1098 | /** End of valid values. */
|
---|
1099 | kIemNativeWhat_End
|
---|
1100 | } IEMNATIVEWHAT;
|
---|
1101 |
|
---|
1102 | /**
|
---|
1103 | * Host general register entry.
|
---|
1104 | *
|
---|
1105 | * The actual allocation status is kept in IEMRECOMPILERSTATE::bmHstRegs.
|
---|
1106 | *
|
---|
1107 | * @todo Track immediate values in host registers similarlly to how we track the
|
---|
1108 | * guest register shadow copies. For it to be real helpful, though,
|
---|
1109 | * we probably need to know which will be reused and put them into
|
---|
1110 | * non-volatile registers, otherwise it's going to be more or less
|
---|
1111 | * restricted to an instruction or two.
|
---|
1112 | */
|
---|
1113 | typedef struct IEMNATIVEHSTREG
|
---|
1114 | {
|
---|
1115 | /** Set of guest registers this one shadows.
|
---|
1116 | *
|
---|
1117 | * Using a bitmap here so we can designate the same host register as a copy
|
---|
1118 | * for more than one guest register. This is expected to be useful in
|
---|
1119 | * situations where one value is copied to several registers in a sequence.
|
---|
1120 | * If the mapping is 1:1, then we'd have to pick which side of a 'MOV SRC,DST'
|
---|
1121 | * sequence we'd want to let this register follow to be a copy of and there
|
---|
1122 | * will always be places where we'd be picking the wrong one.
|
---|
1123 | */
|
---|
1124 | uint64_t fGstRegShadows;
|
---|
1125 | /** What is being kept in this register. */
|
---|
1126 | IEMNATIVEWHAT enmWhat;
|
---|
1127 | /** Variable index (packed) if holding a variable, otherwise UINT8_MAX. */
|
---|
1128 | uint8_t idxVar;
|
---|
1129 | /** Stack slot assigned by iemNativeVarSaveVolatileRegsPreHlpCall and freed
|
---|
1130 | * by iemNativeVarRestoreVolatileRegsPostHlpCall. This is not valid outside
|
---|
1131 | * that scope. */
|
---|
1132 | uint8_t idxStackSlot;
|
---|
1133 | /** Alignment padding. */
|
---|
1134 | uint8_t abAlign[5];
|
---|
1135 | } IEMNATIVEHSTREG;
|
---|
1136 |
|
---|
1137 |
|
---|
1138 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1139 | /**
|
---|
1140 | * Host SIMD register entry - this tracks a virtual 256-bit register split into two 128-bit
|
---|
1141 | * halves, on architectures where there is no 256-bit register available this entry will track
|
---|
1142 | * two adjacent 128-bit host registers.
|
---|
1143 | *
|
---|
1144 | * The actual allocation status is kept in IEMRECOMPILERSTATE::bmHstSimdRegs.
|
---|
1145 | */
|
---|
1146 | typedef struct IEMNATIVEHSTSIMDREG
|
---|
1147 | {
|
---|
1148 | /** Set of guest registers this one shadows.
|
---|
1149 | *
|
---|
1150 | * Using a bitmap here so we can designate the same host register as a copy
|
---|
1151 | * for more than one guest register. This is expected to be useful in
|
---|
1152 | * situations where one value is copied to several registers in a sequence.
|
---|
1153 | * If the mapping is 1:1, then we'd have to pick which side of a 'MOV SRC,DST'
|
---|
1154 | * sequence we'd want to let this register follow to be a copy of and there
|
---|
1155 | * will always be places where we'd be picking the wrong one.
|
---|
1156 | */
|
---|
1157 | uint64_t fGstRegShadows;
|
---|
1158 | /** What is being kept in this register. */
|
---|
1159 | IEMNATIVEWHAT enmWhat;
|
---|
1160 | /** Variable index (packed) if holding a variable, otherwise UINT8_MAX. */
|
---|
1161 | uint8_t idxVar;
|
---|
1162 | /** Flag what is currently loaded, low 128-bits, high 128-bits or complete 256-bits. */
|
---|
1163 | IEMNATIVEGSTSIMDREGLDSTSZ enmLoaded;
|
---|
1164 | /** Alignment padding. */
|
---|
1165 | uint8_t abAlign[5];
|
---|
1166 | } IEMNATIVEHSTSIMDREG;
|
---|
1167 | #endif
|
---|
1168 |
|
---|
1169 |
|
---|
1170 | /**
|
---|
1171 | * Core state for the native recompiler, that is, things that needs careful
|
---|
1172 | * handling when dealing with branches.
|
---|
1173 | */
|
---|
1174 | typedef struct IEMNATIVECORESTATE
|
---|
1175 | {
|
---|
1176 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
1177 | /** The current instruction offset in bytes from when the guest program counter
|
---|
1178 | * was updated last. Used for delaying the write to the guest context program counter
|
---|
1179 | * as long as possible. */
|
---|
1180 | uint32_t offPc;
|
---|
1181 | /** Number of instructions where we could skip the updating. */
|
---|
1182 | uint32_t cInstrPcUpdateSkipped;
|
---|
1183 | #endif
|
---|
1184 | /** Allocation bitmap for aHstRegs. */
|
---|
1185 | uint32_t bmHstRegs;
|
---|
1186 |
|
---|
1187 | /** Bitmap marking which host register contains guest register shadow copies.
|
---|
1188 | * This is used during register allocation to try preserve copies. */
|
---|
1189 | uint32_t bmHstRegsWithGstShadow;
|
---|
1190 | /** Bitmap marking valid entries in aidxGstRegShadows. */
|
---|
1191 | uint64_t bmGstRegShadows;
|
---|
1192 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
1193 | /** Bitmap marking the shadowed guest register as dirty and needing writeback when flushing. */
|
---|
1194 | uint64_t bmGstRegShadowDirty;
|
---|
1195 | #endif
|
---|
1196 |
|
---|
1197 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1198 | /** Allocation bitmap for aHstSimdRegs. */
|
---|
1199 | uint32_t bmHstSimdRegs;
|
---|
1200 |
|
---|
1201 | /** Bitmap marking which host SIMD register contains guest SIMD register shadow copies.
|
---|
1202 | * This is used during register allocation to try preserve copies. */
|
---|
1203 | uint32_t bmHstSimdRegsWithGstShadow;
|
---|
1204 | /** Bitmap marking valid entries in aidxSimdGstRegShadows. */
|
---|
1205 | uint64_t bmGstSimdRegShadows;
|
---|
1206 | /** Bitmap marking whether the low 128-bit of the shadowed guest register are dirty and need writeback. */
|
---|
1207 | uint64_t bmGstSimdRegShadowDirtyLo128;
|
---|
1208 | /** Bitmap marking whether the high 128-bit of the shadowed guest register are dirty and need writeback. */
|
---|
1209 | uint64_t bmGstSimdRegShadowDirtyHi128;
|
---|
1210 | #endif
|
---|
1211 |
|
---|
1212 | union
|
---|
1213 | {
|
---|
1214 | /** Index of variable (unpacked) arguments, UINT8_MAX if not valid. */
|
---|
1215 | uint8_t aidxArgVars[8];
|
---|
1216 | /** For more efficient resetting. */
|
---|
1217 | uint64_t u64ArgVars;
|
---|
1218 | };
|
---|
1219 |
|
---|
1220 | /** Allocation bitmap for the stack. */
|
---|
1221 | uint32_t bmStack;
|
---|
1222 | /** Allocation bitmap for aVars. */
|
---|
1223 | uint32_t bmVars;
|
---|
1224 |
|
---|
1225 | /** Maps a guest register to a host GPR (index by IEMNATIVEGSTREG).
|
---|
1226 | * Entries are only valid if the corresponding bit in bmGstRegShadows is set.
|
---|
1227 | * (A shadow copy of a guest register can only be held in a one host register,
|
---|
1228 | * there are no duplicate copies or ambiguities like that). */
|
---|
1229 | uint8_t aidxGstRegShadows[kIemNativeGstReg_End];
|
---|
1230 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1231 | /** Maps a guest SIMD register to a host SIMD register (index by IEMNATIVEGSTSIMDREG).
|
---|
1232 | * Entries are only valid if the corresponding bit in bmGstSimdRegShadows is set.
|
---|
1233 | * (A shadow copy of a guest register can only be held in a one host register,
|
---|
1234 | * there are no duplicate copies or ambiguities like that). */
|
---|
1235 | uint8_t aidxGstSimdRegShadows[kIemNativeGstSimdReg_End];
|
---|
1236 | #endif
|
---|
1237 |
|
---|
1238 | /** Host register allocation tracking. */
|
---|
1239 | IEMNATIVEHSTREG aHstRegs[IEMNATIVE_HST_GREG_COUNT];
|
---|
1240 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1241 | /** Host SIMD register allocation tracking. */
|
---|
1242 | IEMNATIVEHSTSIMDREG aHstSimdRegs[IEMNATIVE_HST_SIMD_REG_COUNT];
|
---|
1243 | #endif
|
---|
1244 |
|
---|
1245 | /** Variables and arguments. */
|
---|
1246 | IEMNATIVEVAR aVars[9];
|
---|
1247 | } IEMNATIVECORESTATE;
|
---|
1248 | /** Pointer to core state. */
|
---|
1249 | typedef IEMNATIVECORESTATE *PIEMNATIVECORESTATE;
|
---|
1250 | /** Pointer to const core state. */
|
---|
1251 | typedef IEMNATIVECORESTATE const *PCIEMNATIVECORESTATE;
|
---|
1252 |
|
---|
1253 | /** @def IEMNATIVE_VAR_IDX_UNPACK
|
---|
1254 | * @returns Index into IEMNATIVECORESTATE::aVars.
|
---|
1255 | * @param a_idxVar Variable index w/ magic (in strict builds).
|
---|
1256 | */
|
---|
1257 | /** @def IEMNATIVE_VAR_IDX_PACK
|
---|
1258 | * @returns Variable index w/ magic (in strict builds).
|
---|
1259 | * @param a_idxVar Index into IEMNATIVECORESTATE::aVars.
|
---|
1260 | */
|
---|
1261 | #ifdef VBOX_STRICT
|
---|
1262 | # define IEMNATIVE_VAR_IDX_UNPACK(a_idxVar) ((a_idxVar) & IEMNATIVE_VAR_IDX_MASK)
|
---|
1263 | # define IEMNATIVE_VAR_IDX_PACK(a_idxVar) ((a_idxVar) | IEMNATIVE_VAR_IDX_MAGIC)
|
---|
1264 | # define IEMNATIVE_VAR_IDX_MAGIC UINT8_C(0xd0)
|
---|
1265 | # define IEMNATIVE_VAR_IDX_MAGIC_MASK UINT8_C(0xf0)
|
---|
1266 | # define IEMNATIVE_VAR_IDX_MASK UINT8_C(0x0f)
|
---|
1267 | #else
|
---|
1268 | # define IEMNATIVE_VAR_IDX_UNPACK(a_idxVar) (a_idxVar)
|
---|
1269 | # define IEMNATIVE_VAR_IDX_PACK(a_idxVar) (a_idxVar)
|
---|
1270 | #endif
|
---|
1271 |
|
---|
1272 |
|
---|
1273 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1274 | /** Clear the dirty state of the given guest SIMD register. */
|
---|
1275 | # define IEMNATIVE_SIMD_REG_STATE_CLR_DIRTY(a_pReNative, a_iSimdReg) \
|
---|
1276 | do { \
|
---|
1277 | (a_pReNative)->Core.bmGstSimdRegShadowDirtyLo128 &= ~RT_BIT_64(a_iSimdReg); \
|
---|
1278 | (a_pReNative)->Core.bmGstSimdRegShadowDirtyHi128 &= ~RT_BIT_64(a_iSimdReg); \
|
---|
1279 | } while (0)
|
---|
1280 |
|
---|
1281 | /** Returns whether the low 128-bits of the given guest SIMD register are dirty. */
|
---|
1282 | # define IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_LO_U128(a_pReNative, a_iSimdReg) \
|
---|
1283 | RT_BOOL((a_pReNative)->Core.bmGstSimdRegShadowDirtyLo128 & RT_BIT_64(a_iSimdReg))
|
---|
1284 | /** Returns whether the high 128-bits of the given guest SIMD register are dirty. */
|
---|
1285 | # define IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_HI_U128(a_pReNative, a_iSimdReg) \
|
---|
1286 | RT_BOOL((a_pReNative)->Core.bmGstSimdRegShadowDirtyHi128 & RT_BIT_64(a_iSimdReg))
|
---|
1287 | /** Returns whether the given guest SIMD register is dirty. */
|
---|
1288 | # define IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_U256(a_pReNative, a_iSimdReg) \
|
---|
1289 | RT_BOOL(((a_pReNative)->Core.bmGstSimdRegShadowDirtyLo128 | (a_pReNative)->Core.bmGstSimdRegShadowDirtyHi128) & RT_BIT_64(a_iSimdReg))
|
---|
1290 |
|
---|
1291 | /** Set the low 128-bits of the given guest SIMD register to the dirty state. */
|
---|
1292 | # define IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(a_pReNative, a_iSimdReg) \
|
---|
1293 | ((a_pReNative)->Core.bmGstSimdRegShadowDirtyLo128 |= RT_BIT_64(a_iSimdReg))
|
---|
1294 | /** Set the high 128-bits of the given guest SIMD register to the dirty state. */
|
---|
1295 | # define IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_HI_U128(a_pReNative, a_iSimdReg) \
|
---|
1296 | ((a_pReNative)->Core.bmGstSimdRegShadowDirtyHi128 |= RT_BIT_64(a_iSimdReg))
|
---|
1297 |
|
---|
1298 | /** Flag for indicating that IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() has emitted code in the current TB. */
|
---|
1299 | # define IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_DEVICE_NOT_AVAILABLE RT_BIT_32(0)
|
---|
1300 | /** Flag for indicating that IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() has emitted code in the current TB. */
|
---|
1301 | # define IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_WAIT_DEVICE_NOT_AVAILABLE RT_BIT_32(1)
|
---|
1302 | /** Flag for indicating that IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() has emitted code in the current TB. */
|
---|
1303 | # define IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_SSE RT_BIT_32(2)
|
---|
1304 | /** Flag for indicating that IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() has emitted code in the current TB. */
|
---|
1305 | # define IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_AVX RT_BIT_32(3)
|
---|
1306 | #endif
|
---|
1307 |
|
---|
1308 |
|
---|
1309 | /**
|
---|
1310 | * Conditional stack entry.
|
---|
1311 | */
|
---|
1312 | typedef struct IEMNATIVECOND
|
---|
1313 | {
|
---|
1314 | /** Set if we're in the "else" part, clear if we're in the "if" before it. */
|
---|
1315 | bool fInElse;
|
---|
1316 | /** The label for the IEM_MC_ELSE. */
|
---|
1317 | uint32_t idxLabelElse;
|
---|
1318 | /** The label for the IEM_MC_ENDIF. */
|
---|
1319 | uint32_t idxLabelEndIf;
|
---|
1320 | /** The initial state snapshot as the if-block starts executing. */
|
---|
1321 | IEMNATIVECORESTATE InitialState;
|
---|
1322 | /** The state snapshot at the end of the if-block. */
|
---|
1323 | IEMNATIVECORESTATE IfFinalState;
|
---|
1324 | } IEMNATIVECOND;
|
---|
1325 | /** Pointer to a condition stack entry. */
|
---|
1326 | typedef IEMNATIVECOND *PIEMNATIVECOND;
|
---|
1327 |
|
---|
1328 |
|
---|
1329 | /**
|
---|
1330 | * Native recompiler state.
|
---|
1331 | */
|
---|
1332 | typedef struct IEMRECOMPILERSTATE
|
---|
1333 | {
|
---|
1334 | /** Size of the buffer that pbNativeRecompileBufR3 points to in
|
---|
1335 | * IEMNATIVEINSTR units. */
|
---|
1336 | uint32_t cInstrBufAlloc;
|
---|
1337 | #ifdef VBOX_STRICT
|
---|
1338 | /** Strict: How far the last iemNativeInstrBufEnsure() checked. */
|
---|
1339 | uint32_t offInstrBufChecked;
|
---|
1340 | #else
|
---|
1341 | uint32_t uPadding1; /* We don't keep track of the size here... */
|
---|
1342 | #endif
|
---|
1343 | /** Fixed temporary code buffer for native recompilation. */
|
---|
1344 | PIEMNATIVEINSTR pInstrBuf;
|
---|
1345 |
|
---|
1346 | /** Bitmaps with the label types used. */
|
---|
1347 | uint64_t bmLabelTypes;
|
---|
1348 | /** Actual number of labels in paLabels. */
|
---|
1349 | uint32_t cLabels;
|
---|
1350 | /** Max number of entries allowed in paLabels before reallocating it. */
|
---|
1351 | uint32_t cLabelsAlloc;
|
---|
1352 | /** Labels defined while recompiling (referenced by fixups). */
|
---|
1353 | PIEMNATIVELABEL paLabels;
|
---|
1354 | /** Array with indexes of unique labels (uData always 0). */
|
---|
1355 | uint32_t aidxUniqueLabels[kIemNativeLabelType_FirstWithMultipleInstances];
|
---|
1356 |
|
---|
1357 | /** Actual number of fixups paFixups. */
|
---|
1358 | uint32_t cFixups;
|
---|
1359 | /** Max number of entries allowed in paFixups before reallocating it. */
|
---|
1360 | uint32_t cFixupsAlloc;
|
---|
1361 | /** Buffer used by the recompiler for recording fixups when generating code. */
|
---|
1362 | PIEMNATIVEFIXUP paFixups;
|
---|
1363 |
|
---|
1364 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
1365 | /** Number of debug info entries allocated for pDbgInfo. */
|
---|
1366 | uint32_t cDbgInfoAlloc;
|
---|
1367 | uint32_t uPadding;
|
---|
1368 | /** Debug info. */
|
---|
1369 | PIEMTBDBG pDbgInfo;
|
---|
1370 | #endif
|
---|
1371 |
|
---|
1372 | #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
|
---|
1373 | /** The current call index (liveness array and threaded calls in TB). */
|
---|
1374 | uint32_t idxCurCall;
|
---|
1375 | /** Number of liveness entries allocated. */
|
---|
1376 | uint32_t cLivenessEntriesAlloc;
|
---|
1377 | /** Liveness entries for all the calls in the TB begin recompiled.
|
---|
1378 | * The entry for idxCurCall contains the info for what the next call will
|
---|
1379 | * require wrt registers. (Which means the last entry is the initial liveness
|
---|
1380 | * state.) */
|
---|
1381 | PIEMLIVENESSENTRY paLivenessEntries;
|
---|
1382 | #endif
|
---|
1383 |
|
---|
1384 | /** The translation block being recompiled. */
|
---|
1385 | PCIEMTB pTbOrg;
|
---|
1386 | /** The VMCPU structure of the EMT. */
|
---|
1387 | PVMCPUCC pVCpu;
|
---|
1388 |
|
---|
1389 | /** Condition sequence number (for generating unique labels). */
|
---|
1390 | uint16_t uCondSeqNo;
|
---|
1391 | /** Check IRQ seqeunce number (for generating unique labels). */
|
---|
1392 | uint16_t uCheckIrqSeqNo;
|
---|
1393 | /** TLB load sequence number (for generating unique labels). */
|
---|
1394 | uint16_t uTlbSeqNo;
|
---|
1395 | /** The current condition stack depth (aCondStack). */
|
---|
1396 | uint8_t cCondDepth;
|
---|
1397 |
|
---|
1398 | /** The argument count + hidden regs from the IEM_MC_BEGIN_EX statement. */
|
---|
1399 | uint8_t cArgsX;
|
---|
1400 | /** The IEM_CIMPL_F_XXX flags from the IEM_MC_BEGIN statement. */
|
---|
1401 | uint32_t fCImpl;
|
---|
1402 | /** The IEM_MC_F_XXX flags from the IEM_MC_BEGIN statement. */
|
---|
1403 | uint32_t fMc;
|
---|
1404 | /** The expected IEMCPU::fExec value for the current call/instruction. */
|
---|
1405 | uint32_t fExec;
|
---|
1406 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1407 | /** IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_XXX flags for exception flags
|
---|
1408 | * we only emit once per TB (or when the cr0/cr4/xcr0 register changes).
|
---|
1409 | *
|
---|
1410 | * This is an optimization because these control registers can only be changed from
|
---|
1411 | * by calling a C helper we can catch. Should reduce the number of instructions in a TB
|
---|
1412 | * consisting of multiple SIMD instructions.
|
---|
1413 | */
|
---|
1414 | uint32_t fSimdRaiseXcptChecksEmitted;
|
---|
1415 | #endif
|
---|
1416 |
|
---|
1417 | /** Core state requiring care with branches. */
|
---|
1418 | IEMNATIVECORESTATE Core;
|
---|
1419 |
|
---|
1420 | /** The condition nesting stack. */
|
---|
1421 | IEMNATIVECOND aCondStack[2];
|
---|
1422 |
|
---|
1423 | #ifndef IEM_WITH_THROW_CATCH
|
---|
1424 | /** Pointer to the setjmp/longjmp buffer if we're not using C++ exceptions
|
---|
1425 | * for recompilation error handling. */
|
---|
1426 | jmp_buf JmpBuf;
|
---|
1427 | #endif
|
---|
1428 | } IEMRECOMPILERSTATE;
|
---|
1429 | /** Pointer to a native recompiler state. */
|
---|
1430 | typedef IEMRECOMPILERSTATE *PIEMRECOMPILERSTATE;
|
---|
1431 |
|
---|
1432 |
|
---|
1433 | /** @def IEMNATIVE_TRY_SETJMP
|
---|
1434 | * Wrapper around setjmp / try, hiding all the ugly differences.
|
---|
1435 | *
|
---|
1436 | * @note Use with extreme care as this is a fragile macro.
|
---|
1437 | * @param a_pReNative The native recompile state.
|
---|
1438 | * @param a_rcTarget The variable that should receive the status code in case
|
---|
1439 | * of a longjmp/throw.
|
---|
1440 | */
|
---|
1441 | /** @def IEMNATIVE_CATCH_LONGJMP_BEGIN
|
---|
1442 | * Start wrapper for catch / setjmp-else.
|
---|
1443 | *
|
---|
1444 | * This will set up a scope.
|
---|
1445 | *
|
---|
1446 | * @note Use with extreme care as this is a fragile macro.
|
---|
1447 | * @param a_pReNative The native recompile state.
|
---|
1448 | * @param a_rcTarget The variable that should receive the status code in case
|
---|
1449 | * of a longjmp/throw.
|
---|
1450 | */
|
---|
1451 | /** @def IEMNATIVE_CATCH_LONGJMP_END
|
---|
1452 | * End wrapper for catch / setjmp-else.
|
---|
1453 | *
|
---|
1454 | * This will close the scope set up by IEMNATIVE_CATCH_LONGJMP_BEGIN and clean
|
---|
1455 | * up the state.
|
---|
1456 | *
|
---|
1457 | * @note Use with extreme care as this is a fragile macro.
|
---|
1458 | * @param a_pReNative The native recompile state.
|
---|
1459 | */
|
---|
1460 | /** @def IEMNATIVE_DO_LONGJMP
|
---|
1461 | *
|
---|
1462 | * Wrapper around longjmp / throw.
|
---|
1463 | *
|
---|
1464 | * @param a_pReNative The native recompile state.
|
---|
1465 | * @param a_rc The status code jump back with / throw.
|
---|
1466 | */
|
---|
1467 | #ifdef IEM_WITH_THROW_CATCH
|
---|
1468 | # define IEMNATIVE_TRY_SETJMP(a_pReNative, a_rcTarget) \
|
---|
1469 | a_rcTarget = VINF_SUCCESS; \
|
---|
1470 | try
|
---|
1471 | # define IEMNATIVE_CATCH_LONGJMP_BEGIN(a_pReNative, a_rcTarget) \
|
---|
1472 | catch (int rcThrown) \
|
---|
1473 | { \
|
---|
1474 | a_rcTarget = rcThrown
|
---|
1475 | # define IEMNATIVE_CATCH_LONGJMP_END(a_pReNative) \
|
---|
1476 | } \
|
---|
1477 | ((void)0)
|
---|
1478 | # define IEMNATIVE_DO_LONGJMP(a_pReNative, a_rc) throw int(a_rc)
|
---|
1479 | #else /* !IEM_WITH_THROW_CATCH */
|
---|
1480 | # define IEMNATIVE_TRY_SETJMP(a_pReNative, a_rcTarget) \
|
---|
1481 | if ((a_rcTarget = setjmp((a_pReNative)->JmpBuf)) == 0)
|
---|
1482 | # define IEMNATIVE_CATCH_LONGJMP_BEGIN(a_pReNative, a_rcTarget) \
|
---|
1483 | else \
|
---|
1484 | { \
|
---|
1485 | ((void)0)
|
---|
1486 | # define IEMNATIVE_CATCH_LONGJMP_END(a_pReNative) \
|
---|
1487 | }
|
---|
1488 | # define IEMNATIVE_DO_LONGJMP(a_pReNative, a_rc) longjmp((a_pReNative)->JmpBuf, (a_rc))
|
---|
1489 | #endif /* !IEM_WITH_THROW_CATCH */
|
---|
1490 |
|
---|
1491 |
|
---|
1492 | /**
|
---|
1493 | * Native recompiler worker for a threaded function.
|
---|
1494 | *
|
---|
1495 | * @returns New code buffer offset; throws VBox status code in case of a failure.
|
---|
1496 | * @param pReNative The native recompiler state.
|
---|
1497 | * @param off The current code buffer offset.
|
---|
1498 | * @param pCallEntry The threaded call entry.
|
---|
1499 | *
|
---|
1500 | * @note This may throw/longjmp VBox status codes (int) to abort compilation, so no RT_NOEXCEPT!
|
---|
1501 | */
|
---|
1502 | typedef uint32_t (VBOXCALL FNIEMNATIVERECOMPFUNC)(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry);
|
---|
1503 | /** Pointer to a native recompiler worker for a threaded function. */
|
---|
1504 | typedef FNIEMNATIVERECOMPFUNC *PFNIEMNATIVERECOMPFUNC;
|
---|
1505 |
|
---|
1506 | /** Defines a native recompiler worker for a threaded function.
|
---|
1507 | * @see FNIEMNATIVERECOMPFUNC */
|
---|
1508 | #define IEM_DECL_IEMNATIVERECOMPFUNC_DEF(a_Name) \
|
---|
1509 | uint32_t VBOXCALL a_Name(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry)
|
---|
1510 |
|
---|
1511 | /** Prototypes a native recompiler function for a threaded function.
|
---|
1512 | * @see FNIEMNATIVERECOMPFUNC */
|
---|
1513 | #define IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(a_Name) FNIEMNATIVERECOMPFUNC a_Name
|
---|
1514 |
|
---|
1515 |
|
---|
1516 | /**
|
---|
1517 | * Native recompiler liveness analysis worker for a threaded function.
|
---|
1518 | *
|
---|
1519 | * @param pCallEntry The threaded call entry.
|
---|
1520 | * @param pIncoming The incoming liveness state entry.
|
---|
1521 | * @param pOutgoing The outgoing liveness state entry.
|
---|
1522 | */
|
---|
1523 | typedef DECLCALLBACKTYPE(void, FNIEMNATIVELIVENESSFUNC, (PCIEMTHRDEDCALLENTRY pCallEntry,
|
---|
1524 | PCIEMLIVENESSENTRY pIncoming, PIEMLIVENESSENTRY pOutgoing));
|
---|
1525 | /** Pointer to a native recompiler liveness analysis worker for a threaded function. */
|
---|
1526 | typedef FNIEMNATIVELIVENESSFUNC *PFNIEMNATIVELIVENESSFUNC;
|
---|
1527 |
|
---|
1528 | /** Defines a native recompiler liveness analysis worker for a threaded function.
|
---|
1529 | * @see FNIEMNATIVELIVENESSFUNC */
|
---|
1530 | #define IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(a_Name) \
|
---|
1531 | DECLCALLBACK(void) a_Name(PCIEMTHRDEDCALLENTRY pCallEntry, PCIEMLIVENESSENTRY pIncoming, PIEMLIVENESSENTRY pOutgoing)
|
---|
1532 |
|
---|
1533 | /** Prototypes a native recompiler liveness analysis function for a threaded function.
|
---|
1534 | * @see FNIEMNATIVELIVENESSFUNC */
|
---|
1535 | #define IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(a_Name) FNIEMNATIVELIVENESSFUNC a_Name
|
---|
1536 |
|
---|
1537 |
|
---|
1538 | /** Define a native recompiler helper function, safe to call from the TB code. */
|
---|
1539 | #define IEM_DECL_NATIVE_HLP_DEF(a_RetType, a_Name, a_ArgList) \
|
---|
1540 | DECL_HIDDEN_THROW(a_RetType) VBOXCALL a_Name a_ArgList
|
---|
1541 | /** Prototype a native recompiler helper function, safe to call from the TB code. */
|
---|
1542 | #define IEM_DECL_NATIVE_HLP_PROTO(a_RetType, a_Name, a_ArgList) \
|
---|
1543 | DECL_HIDDEN_THROW(a_RetType) VBOXCALL a_Name a_ArgList
|
---|
1544 | /** Pointer typedef a native recompiler helper function, safe to call from the TB code. */
|
---|
1545 | #define IEM_DECL_NATIVE_HLP_PTR(a_RetType, a_Name, a_ArgList) \
|
---|
1546 | a_RetType (VBOXCALL *a_Name) a_ArgList
|
---|
1547 |
|
---|
1548 |
|
---|
1549 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
1550 | DECL_HIDDEN_THROW(void) iemNativeDbgInfoAddNativeOffset(PIEMRECOMPILERSTATE pReNative, uint32_t off);
|
---|
1551 | DECL_HIDDEN_THROW(void) iemNativeDbgInfoAddGuestRegShadowing(PIEMRECOMPILERSTATE pReNative, IEMNATIVEGSTREG enmGstReg,
|
---|
1552 | uint8_t idxHstReg = UINT8_MAX, uint8_t idxHstRegPrev = UINT8_MAX);
|
---|
1553 | # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1554 | DECL_HIDDEN_THROW(void) iemNativeDbgInfoAddGuestSimdRegShadowing(PIEMRECOMPILERSTATE pReNative,
|
---|
1555 | IEMNATIVEGSTSIMDREG enmGstSimdReg,
|
---|
1556 | uint8_t idxHstSimdReg = UINT8_MAX,
|
---|
1557 | uint8_t idxHstSimdRegPrev = UINT8_MAX);
|
---|
1558 | # endif
|
---|
1559 | # if defined(IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK) || defined(IEMNATIVE_WITH_SIMD_REG_ALLOCATOR)
|
---|
1560 | DECL_HIDDEN_THROW(void) iemNativeDbgInfoAddGuestRegDirty(PIEMRECOMPILERSTATE pReNative, bool fSimdReg,
|
---|
1561 | uint8_t idxGstReg, uint8_t idxHstReg);
|
---|
1562 | DECL_HIDDEN_THROW(void) iemNativeDbgInfoAddGuestRegWriteback(PIEMRECOMPILERSTATE pReNative, bool fSimdReg,
|
---|
1563 | uint64_t fGstReg);
|
---|
1564 | # endif
|
---|
1565 | DECL_HIDDEN_THROW(void) iemNativeDbgInfoAddDelayedPcUpdate(PIEMRECOMPILERSTATE pReNative,
|
---|
1566 | uint32_t offPc, uint32_t cInstrSkipped);
|
---|
1567 | #endif /* IEMNATIVE_WITH_TB_DEBUG_INFO */
|
---|
1568 |
|
---|
1569 | DECL_HIDDEN_THROW(uint32_t) iemNativeLabelCreate(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType,
|
---|
1570 | uint32_t offWhere = UINT32_MAX, uint16_t uData = 0);
|
---|
1571 | DECL_HIDDEN_THROW(void) iemNativeLabelDefine(PIEMRECOMPILERSTATE pReNative, uint32_t idxLabel, uint32_t offWhere);
|
---|
1572 | DECL_HIDDEN_THROW(void) iemNativeAddFixup(PIEMRECOMPILERSTATE pReNative, uint32_t offWhere, uint32_t idxLabel,
|
---|
1573 | IEMNATIVEFIXUPTYPE enmType, int8_t offAddend = 0);
|
---|
1574 | DECL_HIDDEN_THROW(PIEMNATIVEINSTR) iemNativeInstrBufEnsureSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t cInstrReq);
|
---|
1575 |
|
---|
1576 | DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fPreferVolatile = true);
|
---|
1577 | DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpEx(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint32_t fRegMask,
|
---|
1578 | bool fPreferVolatile = true);
|
---|
1579 | DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm,
|
---|
1580 | bool fPreferVolatile = true);
|
---|
1581 | DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
|
---|
1582 | IEMNATIVEGSTREG enmGstReg,
|
---|
1583 | IEMNATIVEGSTREGUSE enmIntendedUse = kIemNativeGstRegUse_ReadOnly,
|
---|
1584 | bool fNoVolatileRegs = false, bool fSkipLivenessAssert = false);
|
---|
1585 | DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
|
---|
1586 | IEMNATIVEGSTREG enmGstReg);
|
---|
1587 |
|
---|
1588 | DECL_HIDDEN_THROW(uint32_t) iemNativeRegAllocArgs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs);
|
---|
1589 | DECL_HIDDEN_THROW(uint8_t) iemNativeRegAssignRc(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg);
|
---|
1590 | #if (defined(IPRT_INCLUDED_x86_h) && defined(RT_ARCH_AMD64)) || (defined(IPRT_INCLUDED_armv8_h) && defined(RT_ARCH_ARM64))
|
---|
1591 | DECL_HIDDEN_THROW(uint32_t) iemNativeRegMoveOrSpillStackVar(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVar,
|
---|
1592 | uint32_t fForbiddenRegs = IEMNATIVE_CALL_VOLATILE_GREG_MASK);
|
---|
1593 | # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1594 | DECL_HIDDEN_THROW(uint32_t) iemNativeSimdRegMoveOrSpillStackVar(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVar,
|
---|
1595 | uint32_t fForbiddenRegs = IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK);
|
---|
1596 | # endif
|
---|
1597 | #endif
|
---|
1598 | DECLHIDDEN(void) iemNativeRegFree(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
|
---|
1599 | DECLHIDDEN(void) iemNativeRegFreeTmp(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
|
---|
1600 | DECLHIDDEN(void) iemNativeRegFreeTmpImm(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
|
---|
1601 | DECLHIDDEN(void) iemNativeRegFreeVar(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg, bool fFlushShadows) RT_NOEXCEPT;
|
---|
1602 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1603 | DECLHIDDEN(void) iemNativeSimdRegFreeVar(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstSimdReg, bool fFlushShadows) RT_NOEXCEPT;
|
---|
1604 | # ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
1605 | DECL_HIDDEN_THROW(uint32_t) iemNativeSimdRegFlushDirtyGuestByHostSimdRegShadow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxHstReg);
|
---|
1606 | # endif
|
---|
1607 | #endif
|
---|
1608 | DECLHIDDEN(void) iemNativeRegFreeAndFlushMask(PIEMRECOMPILERSTATE pReNative, uint32_t fHstRegMask) RT_NOEXCEPT;
|
---|
1609 | DECL_HIDDEN_THROW(uint32_t) iemNativeRegMoveAndFreeAndFlushAtCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs,
|
---|
1610 | uint32_t fKeepVars = 0);
|
---|
1611 | DECLHIDDEN(void) iemNativeRegFlushGuestShadows(PIEMRECOMPILERSTATE pReNative, uint64_t fGstRegs) RT_NOEXCEPT;
|
---|
1612 | DECLHIDDEN(void) iemNativeRegFlushGuestShadowsByHostMask(PIEMRECOMPILERSTATE pReNative, uint32_t fHstRegs) RT_NOEXCEPT;
|
---|
1613 | DECL_HIDDEN_THROW(uint32_t) iemNativeRegRestoreGuestShadowsInVolatileRegs(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
1614 | uint32_t fHstRegsActiveShadows);
|
---|
1615 | #ifdef VBOX_STRICT
|
---|
1616 | DECLHIDDEN(void) iemNativeRegAssertSanity(PIEMRECOMPILERSTATE pReNative);
|
---|
1617 | #endif
|
---|
1618 | DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushPendingWritesSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint64_t fGstShwExcept,
|
---|
1619 | uint64_t fGstSimdShwExcept);
|
---|
1620 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
1621 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitPcWritebackSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off);
|
---|
1622 | #endif
|
---|
1623 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
1624 | DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushPendingWrite(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVEGSTREG enmGstReg);
|
---|
1625 | DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushDirtyGuest(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint64_t fFlushGstReg = UINT64_MAX);
|
---|
1626 | DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushDirtyGuestByHostRegShadow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxHstReg);
|
---|
1627 | #endif
|
---|
1628 |
|
---|
1629 |
|
---|
1630 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1631 | DECL_HIDDEN_THROW(uint8_t) iemNativeSimdRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fPreferVolatile = true);
|
---|
1632 | DECL_HIDDEN_THROW(uint8_t) iemNativeSimdRegAllocTmpEx(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint32_t fRegMask,
|
---|
1633 | bool fPreferVolatile = true);
|
---|
1634 | DECL_HIDDEN_THROW(uint8_t) iemNativeSimdRegAllocTmpForGuestSimdReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
|
---|
1635 | IEMNATIVEGSTSIMDREG enmGstSimdReg,
|
---|
1636 | IEMNATIVEGSTSIMDREGLDSTSZ enmLoadSz,
|
---|
1637 | IEMNATIVEGSTREGUSE enmIntendedUse = kIemNativeGstRegUse_ReadOnly,
|
---|
1638 | bool fNoVolatileRegs = false);
|
---|
1639 | DECLHIDDEN(void) iemNativeSimdRegFreeTmp(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstSimdReg) RT_NOEXCEPT;
|
---|
1640 | DECLHIDDEN(void) iemNativeSimdRegFlushGuestShadows(PIEMRECOMPILERSTATE pReNative, uint64_t fGstSimdRegs) RT_NOEXCEPT;
|
---|
1641 | DECL_HIDDEN_THROW(uint32_t) iemNativeSimdRegFlushPendingWrite(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
1642 | IEMNATIVEGSTSIMDREG enmGstSimdReg);
|
---|
1643 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitLoadSimdRegWithGstShadowSimdReg(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
1644 | uint8_t idxHstSimdReg, IEMNATIVEGSTSIMDREG enmGstSimdReg,
|
---|
1645 | IEMNATIVEGSTSIMDREGLDSTSZ enmLoadSz);
|
---|
1646 | #endif
|
---|
1647 |
|
---|
1648 | DECL_HIDDEN_THROW(uint8_t) iemNativeArgAlloc(PIEMRECOMPILERSTATE pReNative, uint8_t iArgNo, uint8_t cbType);
|
---|
1649 | DECL_HIDDEN_THROW(uint8_t) iemNativeArgAllocConst(PIEMRECOMPILERSTATE pReNative, uint8_t iArgNo, uint8_t cbType, uint64_t uValue);
|
---|
1650 | DECL_HIDDEN_THROW(uint8_t) iemNativeArgAllocLocalRef(PIEMRECOMPILERSTATE pReNative, uint8_t iArgNo, uint8_t idxOtherVar);
|
---|
1651 | DECL_HIDDEN_THROW(uint8_t) iemNativeVarAlloc(PIEMRECOMPILERSTATE pReNative, uint8_t cbType);
|
---|
1652 | DECL_HIDDEN_THROW(uint8_t) iemNativeVarAllocConst(PIEMRECOMPILERSTATE pReNative, uint8_t cbType, uint64_t uValue);
|
---|
1653 | DECL_HIDDEN_THROW(uint8_t) iemNativeVarAllocAssign(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint8_t cbType, uint8_t idxVarOther);
|
---|
1654 | DECL_HIDDEN_THROW(void) iemNativeVarSetKindToStack(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar);
|
---|
1655 | DECL_HIDDEN_THROW(void) iemNativeVarSetKindToConst(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, uint64_t uValue);
|
---|
1656 | DECL_HIDDEN_THROW(void) iemNativeVarSetKindToGstRegRef(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar,
|
---|
1657 | IEMNATIVEGSTREGREF enmRegClass, uint8_t idxReg);
|
---|
1658 | DECL_HIDDEN_THROW(uint8_t) iemNativeVarGetStackSlot(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar);
|
---|
1659 | DECL_HIDDEN_THROW(uint8_t) iemNativeVarRegisterAcquire(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, uint32_t *poff,
|
---|
1660 | bool fInitialized = false, uint8_t idxRegPref = UINT8_MAX);
|
---|
1661 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1662 | DECL_HIDDEN_THROW(uint8_t) iemNativeVarSimdRegisterAcquire(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, uint32_t *poff,
|
---|
1663 | bool fInitialized = false, uint8_t idxRegPref = UINT8_MAX);
|
---|
1664 | #endif
|
---|
1665 | DECL_HIDDEN_THROW(uint8_t) iemNativeVarRegisterAcquireForGuestReg(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar,
|
---|
1666 | IEMNATIVEGSTREG enmGstReg, uint32_t *poff);
|
---|
1667 | DECL_HIDDEN_THROW(uint32_t) iemNativeVarSaveVolatileRegsPreHlpCall(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
1668 | uint32_t fHstRegsNotToSave);
|
---|
1669 | DECL_HIDDEN_THROW(uint32_t) iemNativeVarRestoreVolatileRegsPostHlpCall(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
1670 | uint32_t fHstRegsNotToSave);
|
---|
1671 | DECLHIDDEN(void) iemNativeVarFreeOneWorker(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar);
|
---|
1672 | DECLHIDDEN(void) iemNativeVarFreeAllSlow(PIEMRECOMPILERSTATE pReNative, uint32_t bmVars);
|
---|
1673 |
|
---|
1674 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitLoadGprWithGstShadowReg(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
1675 | uint8_t idxHstReg, IEMNATIVEGSTREG enmGstReg);
|
---|
1676 | #ifdef VBOX_STRICT
|
---|
1677 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitTop32BitsClearCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxReg);
|
---|
1678 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitGuestRegValueCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxReg,
|
---|
1679 | IEMNATIVEGSTREG enmGstReg);
|
---|
1680 | # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1681 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitGuestSimdRegValueCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxSimdReg,
|
---|
1682 | IEMNATIVEGSTSIMDREG enmGstSimdReg,
|
---|
1683 | IEMNATIVEGSTSIMDREGLDSTSZ enmLoadSz);
|
---|
1684 | # endif
|
---|
1685 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitExecFlagsCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fExec);
|
---|
1686 | #endif
|
---|
1687 | #ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING
|
---|
1688 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitEFlagsSkippingCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fEflNeeded);
|
---|
1689 | #endif
|
---|
1690 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitCheckCallRetAndPassUp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr);
|
---|
1691 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitCallCommon(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs, uint8_t cHiddenArgs, bool fFlushPendingWrites = true);
|
---|
1692 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitCImplCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr,
|
---|
1693 | uint64_t fGstShwFlush, uintptr_t pfnCImpl, uint8_t cbInstr, uint8_t cAddParams,
|
---|
1694 | uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
|
---|
1695 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitThreadedCall(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
1696 | PCIEMTHRDEDCALLENTRY pCallEntry);
|
---|
1697 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
1698 | uint8_t idxAddrReg, uint8_t idxInstr);
|
---|
1699 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
1700 | uint8_t idxAddrReg, uint8_t idxInstr);
|
---|
1701 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitLeaGprByGstRegRef(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxGprDst,
|
---|
1702 | IEMNATIVEGSTREGREF enmClass, uint8_t idxRegInClass);
|
---|
1703 |
|
---|
1704 |
|
---|
1705 | IEM_DECL_NATIVE_HLP_PROTO(int, iemNativeHlpExecStatusCodeFiddling,(PVMCPUCC pVCpu, int rc, uint8_t idxInstr));
|
---|
1706 | IEM_DECL_NATIVE_HLP_PROTO(int, iemNativeHlpExecRaiseGp0,(PVMCPUCC pVCpu));
|
---|
1707 | IEM_DECL_NATIVE_HLP_PROTO(int, iemNativeHlpExecRaiseNm,(PVMCPUCC pVCpu));
|
---|
1708 | IEM_DECL_NATIVE_HLP_PROTO(int, iemNativeHlpExecRaiseUd,(PVMCPUCC pVCpu));
|
---|
1709 | IEM_DECL_NATIVE_HLP_PROTO(int, iemNativeHlpExecRaiseMf,(PVMCPUCC pVCpu));
|
---|
1710 | IEM_DECL_NATIVE_HLP_PROTO(int, iemNativeHlpExecRaiseXf,(PVMCPUCC pVCpu));
|
---|
1711 | IEM_DECL_NATIVE_HLP_PROTO(int, iemNativeHlpExecRaiseDe,(PVMCPUCC pVCpu));
|
---|
1712 | IEM_DECL_NATIVE_HLP_PROTO(int, iemNativeHlpObsoleteTb,(PVMCPUCC pVCpu));
|
---|
1713 | IEM_DECL_NATIVE_HLP_PROTO(int, iemNativeHlpNeedCsLimChecking,(PVMCPUCC pVCpu));
|
---|
1714 | IEM_DECL_NATIVE_HLP_PROTO(int, iemNativeHlpCheckBranchMiss,(PVMCPUCC pVCpu));
|
---|
1715 |
|
---|
1716 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFetchDataU8,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1717 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFetchDataU8_Sx_U16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1718 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFetchDataU8_Sx_U32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1719 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFetchDataU8_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1720 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFetchDataU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1721 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFetchDataU16_Sx_U32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1722 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFetchDataU16_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1723 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFetchDataU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1724 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFetchDataU32_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1725 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFetchDataU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1726 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1727 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFetchDataU128,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PRTUINT128U pu128Dst));
|
---|
1728 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFetchDataU128AlignedSse,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PRTUINT128U pu128Dst));
|
---|
1729 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFetchDataU128NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PRTUINT128U pu128Dst));
|
---|
1730 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFetchDataU256NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PRTUINT256U pu256Dst));
|
---|
1731 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFetchDataU256AlignedAvx,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PRTUINT256U pu256Dst));
|
---|
1732 | #endif
|
---|
1733 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemStoreDataU8,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, uint8_t u8Value));
|
---|
1734 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemStoreDataU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, uint16_t u16Value));
|
---|
1735 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemStoreDataU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, uint32_t u32Value));
|
---|
1736 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemStoreDataU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, uint64_t u64Value));
|
---|
1737 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1738 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemStoreDataU128AlignedSse,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PCRTUINT128U pu128Src));
|
---|
1739 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemStoreDataU128NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PCRTUINT128U pu128Src));
|
---|
1740 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemStoreDataU256NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PCRTUINT256U pu256Src));
|
---|
1741 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemStoreDataU256AlignedAvx,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PCRTUINT256U pu256Src));
|
---|
1742 | #endif
|
---|
1743 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpStackStoreU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t u16Value));
|
---|
1744 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpStackStoreU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value));
|
---|
1745 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpStackStoreU32SReg,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value));
|
---|
1746 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpStackStoreU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t u64Value));
|
---|
1747 | IEM_DECL_NATIVE_HLP_PROTO(uint16_t, iemNativeHlpStackFetchU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1748 | IEM_DECL_NATIVE_HLP_PROTO(uint32_t, iemNativeHlpStackFetchU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1749 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpStackFetchU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1750 |
|
---|
1751 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFlatFetchDataU8,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1752 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFlatFetchDataU8_Sx_U16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1753 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFlatFetchDataU8_Sx_U32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1754 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFlatFetchDataU8_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1755 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFlatFetchDataU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1756 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFlatFetchDataU16_Sx_U32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1757 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFlatFetchDataU16_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1758 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFlatFetchDataU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1759 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFlatFetchDataU32_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1760 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpMemFlatFetchDataU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1761 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1762 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFlatFetchDataU128,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PRTUINT128U pu128Dst));
|
---|
1763 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFlatFetchDataU128AlignedSse,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PRTUINT128U pu128Dst));
|
---|
1764 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFlatFetchDataU128NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PRTUINT128U pu128Dst));
|
---|
1765 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFlatFetchDataU256NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PRTUINT256U pu256Dst));
|
---|
1766 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFlatFetchDataU256AlignedAvx,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PRTUINT256U pu256Dst));
|
---|
1767 | #endif
|
---|
1768 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFlatStoreDataU8,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t u8Value));
|
---|
1769 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFlatStoreDataU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t u16Value));
|
---|
1770 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFlatStoreDataU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value));
|
---|
1771 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFlatStoreDataU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t u64Value));
|
---|
1772 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1773 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFlatStoreDataU128AlignedSse,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PCRTUINT128U pu128Src));
|
---|
1774 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFlatStoreDataU128NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PCRTUINT128U pu128Src));
|
---|
1775 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFlatStoreDataU256NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PCRTUINT256U pu256Src));
|
---|
1776 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemFlatStoreDataU256AlignedAvx,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PCRTUINT256U pu256Src));
|
---|
1777 | #endif
|
---|
1778 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpStackFlatStoreU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t u16Value));
|
---|
1779 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpStackFlatStoreU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value));
|
---|
1780 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpStackFlatStoreU32SReg,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value));
|
---|
1781 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpStackFlatStoreU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t u64Value));
|
---|
1782 | IEM_DECL_NATIVE_HLP_PROTO(uint16_t, iemNativeHlpStackFlatFetchU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1783 | IEM_DECL_NATIVE_HLP_PROTO(uint32_t, iemNativeHlpStackFlatFetchU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1784 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t, iemNativeHlpStackFlatFetchU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem));
|
---|
1785 |
|
---|
1786 | IEM_DECL_NATIVE_HLP_PROTO(uint8_t *, iemNativeHlpMemMapDataU8Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1787 | IEM_DECL_NATIVE_HLP_PROTO(uint8_t *, iemNativeHlpMemMapDataU8Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1788 | IEM_DECL_NATIVE_HLP_PROTO(uint8_t *, iemNativeHlpMemMapDataU8Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1789 | IEM_DECL_NATIVE_HLP_PROTO(uint8_t const *, iemNativeHlpMemMapDataU8Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1790 | IEM_DECL_NATIVE_HLP_PROTO(uint16_t *, iemNativeHlpMemMapDataU16Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1791 | IEM_DECL_NATIVE_HLP_PROTO(uint16_t *, iemNativeHlpMemMapDataU16Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1792 | IEM_DECL_NATIVE_HLP_PROTO(uint16_t *, iemNativeHlpMemMapDataU16Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1793 | IEM_DECL_NATIVE_HLP_PROTO(uint16_t const *, iemNativeHlpMemMapDataU16Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1794 | IEM_DECL_NATIVE_HLP_PROTO(uint32_t *, iemNativeHlpMemMapDataU32Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1795 | IEM_DECL_NATIVE_HLP_PROTO(uint32_t *, iemNativeHlpMemMapDataU32Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1796 | IEM_DECL_NATIVE_HLP_PROTO(uint32_t *, iemNativeHlpMemMapDataU32Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1797 | IEM_DECL_NATIVE_HLP_PROTO(uint32_t const *, iemNativeHlpMemMapDataU32Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1798 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t *, iemNativeHlpMemMapDataU64Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1799 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t *, iemNativeHlpMemMapDataU64Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1800 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t *, iemNativeHlpMemMapDataU64Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1801 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t const *, iemNativeHlpMemMapDataU64Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1802 | IEM_DECL_NATIVE_HLP_PROTO(RTFLOAT80U *, iemNativeHlpMemMapDataR80Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1803 | IEM_DECL_NATIVE_HLP_PROTO(RTPBCD80U *, iemNativeHlpMemMapDataD80Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1804 | IEM_DECL_NATIVE_HLP_PROTO(RTUINT128U *, iemNativeHlpMemMapDataU128Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1805 | IEM_DECL_NATIVE_HLP_PROTO(RTUINT128U *, iemNativeHlpMemMapDataU128Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1806 | IEM_DECL_NATIVE_HLP_PROTO(RTUINT128U *, iemNativeHlpMemMapDataU128Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1807 | IEM_DECL_NATIVE_HLP_PROTO(RTUINT128U const *, iemNativeHlpMemMapDataU128Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem, uint8_t iSegReg));
|
---|
1808 |
|
---|
1809 | IEM_DECL_NATIVE_HLP_PROTO(uint8_t *, iemNativeHlpMemFlatMapDataU8Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1810 | IEM_DECL_NATIVE_HLP_PROTO(uint8_t *, iemNativeHlpMemFlatMapDataU8Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1811 | IEM_DECL_NATIVE_HLP_PROTO(uint8_t *, iemNativeHlpMemFlatMapDataU8Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1812 | IEM_DECL_NATIVE_HLP_PROTO(uint8_t const *, iemNativeHlpMemFlatMapDataU8Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1813 | IEM_DECL_NATIVE_HLP_PROTO(uint16_t *, iemNativeHlpMemFlatMapDataU16Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1814 | IEM_DECL_NATIVE_HLP_PROTO(uint16_t *, iemNativeHlpMemFlatMapDataU16Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1815 | IEM_DECL_NATIVE_HLP_PROTO(uint16_t *, iemNativeHlpMemFlatMapDataU16Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1816 | IEM_DECL_NATIVE_HLP_PROTO(uint16_t const *, iemNativeHlpMemFlatMapDataU16Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1817 | IEM_DECL_NATIVE_HLP_PROTO(uint32_t *, iemNativeHlpMemFlatMapDataU32Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1818 | IEM_DECL_NATIVE_HLP_PROTO(uint32_t *, iemNativeHlpMemFlatMapDataU32Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1819 | IEM_DECL_NATIVE_HLP_PROTO(uint32_t *, iemNativeHlpMemFlatMapDataU32Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1820 | IEM_DECL_NATIVE_HLP_PROTO(uint32_t const *, iemNativeHlpMemFlatMapDataU32Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1821 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t *, iemNativeHlpMemFlatMapDataU64Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1822 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t *, iemNativeHlpMemFlatMapDataU64Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1823 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t *, iemNativeHlpMemFlatMapDataU64Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1824 | IEM_DECL_NATIVE_HLP_PROTO(uint64_t const *, iemNativeHlpMemFlatMapDataU64Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1825 | IEM_DECL_NATIVE_HLP_PROTO(RTFLOAT80U *, iemNativeHlpMemFlatMapDataR80Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1826 | IEM_DECL_NATIVE_HLP_PROTO(RTPBCD80U *, iemNativeHlpMemFlatMapDataD80Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1827 | IEM_DECL_NATIVE_HLP_PROTO(RTUINT128U *, iemNativeHlpMemFlatMapDataU128Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1828 | IEM_DECL_NATIVE_HLP_PROTO(RTUINT128U *, iemNativeHlpMemFlatMapDataU128Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1829 | IEM_DECL_NATIVE_HLP_PROTO(RTUINT128U *, iemNativeHlpMemFlatMapDataU128Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1830 | IEM_DECL_NATIVE_HLP_PROTO(RTUINT128U const *, iemNativeHlpMemFlatMapDataU128Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem));
|
---|
1831 |
|
---|
1832 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemCommitAndUnmapAtomic,(PVMCPUCC pVCpu, uint8_t bUnmapInfo));
|
---|
1833 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemCommitAndUnmapRw,(PVMCPUCC pVCpu, uint8_t bUnmapInfo));
|
---|
1834 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemCommitAndUnmapWo,(PVMCPUCC pVCpu, uint8_t bUnmapInfo));
|
---|
1835 | IEM_DECL_NATIVE_HLP_PROTO(void, iemNativeHlpMemCommitAndUnmapRo,(PVMCPUCC pVCpu, uint8_t bUnmapInfo));
|
---|
1836 |
|
---|
1837 |
|
---|
1838 | /**
|
---|
1839 | * Info about shadowed guest register values.
|
---|
1840 | * @see IEMNATIVEGSTREG
|
---|
1841 | */
|
---|
1842 | typedef struct IEMANTIVEGSTREGINFO
|
---|
1843 | {
|
---|
1844 | /** Offset in VMCPU. */
|
---|
1845 | uint32_t off;
|
---|
1846 | /** The field size. */
|
---|
1847 | uint8_t cb;
|
---|
1848 | /** Name (for logging). */
|
---|
1849 | const char *pszName;
|
---|
1850 | } IEMANTIVEGSTREGINFO;
|
---|
1851 | extern DECL_HIDDEN_DATA(IEMANTIVEGSTREGINFO const) g_aGstShadowInfo[];
|
---|
1852 | extern DECL_HIDDEN_DATA(const char * const) g_apszIemNativeHstRegNames[];
|
---|
1853 | extern DECL_HIDDEN_DATA(int32_t const) g_aoffIemNativeCallStackArgBpDisp[];
|
---|
1854 | extern DECL_HIDDEN_DATA(uint32_t const) g_afIemNativeCallRegs[];
|
---|
1855 | extern DECL_HIDDEN_DATA(uint8_t const) g_aidxIemNativeCallRegs[];
|
---|
1856 |
|
---|
1857 |
|
---|
1858 |
|
---|
1859 | /**
|
---|
1860 | * Ensures that there is sufficient space in the instruction output buffer.
|
---|
1861 | *
|
---|
1862 | * This will reallocate the buffer if needed and allowed.
|
---|
1863 | *
|
---|
1864 | * @note Always use IEMNATIVE_ASSERT_INSTR_BUF_ENSURE when done to check the
|
---|
1865 | * allocation size.
|
---|
1866 | *
|
---|
1867 | * @returns Pointer to the instruction output buffer on success; throws VBox
|
---|
1868 | * status code on failure, so no need to check it.
|
---|
1869 | * @param pReNative The native recompile state.
|
---|
1870 | * @param off Current instruction offset. Works safely for UINT32_MAX
|
---|
1871 | * as well.
|
---|
1872 | * @param cInstrReq Number of instruction about to be added. It's okay to
|
---|
1873 | * overestimate this a bit.
|
---|
1874 | */
|
---|
1875 | DECL_FORCE_INLINE_THROW(PIEMNATIVEINSTR)
|
---|
1876 | iemNativeInstrBufEnsure(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t cInstrReq)
|
---|
1877 | {
|
---|
1878 | uint64_t const offChecked = off + (uint64_t)cInstrReq; /** @todo may reconsider the need for UINT32_MAX safety... */
|
---|
1879 | if (RT_LIKELY(offChecked <= pReNative->cInstrBufAlloc))
|
---|
1880 | {
|
---|
1881 | #ifdef VBOX_STRICT
|
---|
1882 | pReNative->offInstrBufChecked = offChecked;
|
---|
1883 | #endif
|
---|
1884 | return pReNative->pInstrBuf;
|
---|
1885 | }
|
---|
1886 | return iemNativeInstrBufEnsureSlow(pReNative, off, cInstrReq);
|
---|
1887 | }
|
---|
1888 |
|
---|
1889 | /**
|
---|
1890 | * Checks that we didn't exceed the space requested in the last
|
---|
1891 | * iemNativeInstrBufEnsure() call.
|
---|
1892 | */
|
---|
1893 | #define IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(a_pReNative, a_off) \
|
---|
1894 | AssertMsg((a_off) <= (a_pReNative)->offInstrBufChecked, \
|
---|
1895 | ("off=%#x offInstrBufChecked=%#x\n", (a_off), (a_pReNative)->offInstrBufChecked))
|
---|
1896 |
|
---|
1897 | /**
|
---|
1898 | * Checks that a variable index is valid.
|
---|
1899 | */
|
---|
1900 | #ifdef IEMNATIVE_VAR_IDX_MAGIC
|
---|
1901 | # define IEMNATIVE_ASSERT_VAR_IDX(a_pReNative, a_idxVar) \
|
---|
1902 | AssertMsg( ((a_idxVar) & IEMNATIVE_VAR_IDX_MAGIC_MASK) == IEMNATIVE_VAR_IDX_MAGIC \
|
---|
1903 | && (unsigned)IEMNATIVE_VAR_IDX_UNPACK(a_idxVar) < RT_ELEMENTS((a_pReNative)->Core.aVars) \
|
---|
1904 | && ((a_pReNative)->Core.bmVars & RT_BIT_32(IEMNATIVE_VAR_IDX_UNPACK(a_idxVar))), \
|
---|
1905 | ("%s=%#x\n", #a_idxVar, a_idxVar))
|
---|
1906 | #else
|
---|
1907 | # define IEMNATIVE_ASSERT_VAR_IDX(a_pReNative, a_idxVar) \
|
---|
1908 | AssertMsg( (unsigned)(a_idxVar) < RT_ELEMENTS((a_pReNative)->Core.aVars) \
|
---|
1909 | && ((a_pReNative)->Core.bmVars & RT_BIT_32(a_idxVar)), ("%s=%d\n", #a_idxVar, a_idxVar))
|
---|
1910 | #endif
|
---|
1911 |
|
---|
1912 | /**
|
---|
1913 | * Checks that a variable index is valid and that the variable is assigned the
|
---|
1914 | * correct argument number.
|
---|
1915 | * This also adds a RT_NOREF of a_idxVar.
|
---|
1916 | */
|
---|
1917 | #ifdef IEMNATIVE_VAR_IDX_MAGIC
|
---|
1918 | # define IEMNATIVE_ASSERT_ARG_VAR_IDX(a_pReNative, a_idxVar, a_uArgNo) do { \
|
---|
1919 | RT_NOREF_PV(a_idxVar); \
|
---|
1920 | AssertMsg( ((a_idxVar) & IEMNATIVE_VAR_IDX_MAGIC_MASK) == IEMNATIVE_VAR_IDX_MAGIC \
|
---|
1921 | && (unsigned)IEMNATIVE_VAR_IDX_UNPACK(a_idxVar) < RT_ELEMENTS((a_pReNative)->Core.aVars) \
|
---|
1922 | && ((a_pReNative)->Core.bmVars & RT_BIT_32(IEMNATIVE_VAR_IDX_UNPACK(a_idxVar))) \
|
---|
1923 | && (a_pReNative)->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(a_idxVar)].uArgNo == (a_uArgNo), \
|
---|
1924 | ("%s=%d; uArgNo=%d, expected %u\n", #a_idxVar, a_idxVar, \
|
---|
1925 | (a_pReNative)->Core.aVars[RT_MIN(IEMNATIVE_VAR_IDX_UNPACK(a_idxVar), \
|
---|
1926 | RT_ELEMENTS((a_pReNative)->Core.aVars)) - 1].uArgNo, \
|
---|
1927 | a_uArgNo)); \
|
---|
1928 | } while (0)
|
---|
1929 | #else
|
---|
1930 | # define IEMNATIVE_ASSERT_ARG_VAR_IDX(a_pReNative, a_idxVar, a_uArgNo) do { \
|
---|
1931 | RT_NOREF_PV(a_idxVar); \
|
---|
1932 | AssertMsg( (unsigned)(a_idxVar) < RT_ELEMENTS((a_pReNative)->Core.aVars) \
|
---|
1933 | && ((a_pReNative)->Core.bmVars & RT_BIT_32(a_idxVar))\
|
---|
1934 | && (a_pReNative)->Core.aVars[a_idxVar].uArgNo == (a_uArgNo) \
|
---|
1935 | , ("%s=%d; uArgNo=%d, expected %u\n", #a_idxVar, a_idxVar, \
|
---|
1936 | (a_pReNative)->Core.aVars[RT_MIN(a_idxVar, RT_ELEMENTS((a_pReNative)->Core.aVars)) - 1].uArgNo, a_uArgNo)); \
|
---|
1937 | } while (0)
|
---|
1938 | #endif
|
---|
1939 |
|
---|
1940 |
|
---|
1941 | /**
|
---|
1942 | * Checks that a variable has the expected size.
|
---|
1943 | */
|
---|
1944 | #define IEMNATIVE_ASSERT_VAR_SIZE(a_pReNative, a_idxVar, a_cbVar) \
|
---|
1945 | AssertMsg((a_pReNative)->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(a_idxVar)].cbVar == (a_cbVar), \
|
---|
1946 | ("%s=%#x: cbVar=%#x, expected %#x!\n", #a_idxVar, a_idxVar, \
|
---|
1947 | (a_pReNative)->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(a_idxVar)].cbVar == (a_cbVar)))
|
---|
1948 |
|
---|
1949 |
|
---|
1950 | /**
|
---|
1951 | * Calculates the stack address of a variable as a [r]BP displacement value.
|
---|
1952 | */
|
---|
1953 | DECL_FORCE_INLINE(int32_t)
|
---|
1954 | iemNativeStackCalcBpDisp(uint8_t idxStackSlot)
|
---|
1955 | {
|
---|
1956 | Assert(idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS);
|
---|
1957 | return idxStackSlot * sizeof(uint64_t) + IEMNATIVE_FP_OFF_STACK_VARS;
|
---|
1958 | }
|
---|
1959 |
|
---|
1960 |
|
---|
1961 | /**
|
---|
1962 | * Releases the variable's register.
|
---|
1963 | *
|
---|
1964 | * The register must have been previously acquired calling
|
---|
1965 | * iemNativeVarRegisterAcquire(), iemNativeVarRegisterAcquireForGuestReg() or
|
---|
1966 | * iemNativeVarRegisterSetAndAcquire().
|
---|
1967 | */
|
---|
1968 | DECL_INLINE_THROW(void) iemNativeVarRegisterRelease(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
|
---|
1969 | {
|
---|
1970 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
1971 | Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fRegAcquired);
|
---|
1972 | pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fRegAcquired = false;
|
---|
1973 | }
|
---|
1974 |
|
---|
1975 |
|
---|
1976 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1977 | DECL_INLINE_THROW(void) iemNativeVarSimdRegisterRelease(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
|
---|
1978 | {
|
---|
1979 | Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fSimdReg);
|
---|
1980 | iemNativeVarRegisterRelease(pReNative, idxVar);
|
---|
1981 | }
|
---|
1982 | #endif
|
---|
1983 |
|
---|
1984 |
|
---|
1985 | /**
|
---|
1986 | * Converts IEM_CIMPL_F_XXX flags into a guest register shadow copy flush mask.
|
---|
1987 | *
|
---|
1988 | * @returns The flush mask.
|
---|
1989 | * @param fCImpl The IEM_CIMPL_F_XXX flags.
|
---|
1990 | * @param fGstShwFlush The starting flush mask.
|
---|
1991 | */
|
---|
1992 | DECL_FORCE_INLINE(uint64_t) iemNativeCImplFlagsToGuestShadowFlushMask(uint32_t fCImpl, uint64_t fGstShwFlush)
|
---|
1993 | {
|
---|
1994 | if (fCImpl & IEM_CIMPL_F_BRANCH_FAR)
|
---|
1995 | fGstShwFlush |= RT_BIT_64(kIemNativeGstReg_SegSelFirst + X86_SREG_CS)
|
---|
1996 | | RT_BIT_64(kIemNativeGstReg_SegBaseFirst + X86_SREG_CS)
|
---|
1997 | | RT_BIT_64(kIemNativeGstReg_SegLimitFirst + X86_SREG_CS);
|
---|
1998 | if (fCImpl & IEM_CIMPL_F_BRANCH_STACK_FAR)
|
---|
1999 | fGstShwFlush |= RT_BIT_64(kIemNativeGstReg_GprFirst + X86_GREG_xSP)
|
---|
2000 | | RT_BIT_64(kIemNativeGstReg_SegSelFirst + X86_SREG_SS)
|
---|
2001 | | RT_BIT_64(kIemNativeGstReg_SegBaseFirst + X86_SREG_SS)
|
---|
2002 | | RT_BIT_64(kIemNativeGstReg_SegLimitFirst + X86_SREG_SS);
|
---|
2003 | else if (fCImpl & IEM_CIMPL_F_BRANCH_STACK)
|
---|
2004 | fGstShwFlush |= RT_BIT_64(kIemNativeGstReg_GprFirst + X86_GREG_xSP);
|
---|
2005 | if (fCImpl & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_STATUS_FLAGS | IEM_CIMPL_F_INHIBIT_SHADOW))
|
---|
2006 | fGstShwFlush |= RT_BIT_64(kIemNativeGstReg_EFlags);
|
---|
2007 | return fGstShwFlush;
|
---|
2008 | }
|
---|
2009 |
|
---|
2010 |
|
---|
2011 | /** Number of hidden arguments for CIMPL calls.
|
---|
2012 | * @note We're sufferning from the usual VBOXSTRICTRC fun on Windows. */
|
---|
2013 | #if defined(VBOXSTRICTRC_STRICT_ENABLED) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64)
|
---|
2014 | # define IEM_CIMPL_HIDDEN_ARGS 3
|
---|
2015 | #else
|
---|
2016 | # define IEM_CIMPL_HIDDEN_ARGS 2
|
---|
2017 | #endif
|
---|
2018 |
|
---|
2019 |
|
---|
2020 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
2021 | /** Number of hidden arguments for SSE_AIMPL calls. */
|
---|
2022 | # define IEM_SSE_AIMPL_HIDDEN_ARGS 1
|
---|
2023 | /** Number of hidden arguments for AVX_AIMPL calls. */
|
---|
2024 | # define IEM_AVX_AIMPL_HIDDEN_ARGS 1
|
---|
2025 | #endif
|
---|
2026 |
|
---|
2027 |
|
---|
2028 | #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
|
---|
2029 |
|
---|
2030 | # ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
2031 | /**
|
---|
2032 | * Helper for iemNativeLivenessGetStateByGstReg.
|
---|
2033 | *
|
---|
2034 | * @returns IEMLIVENESS_STATE_XXX
|
---|
2035 | * @param fMergedStateExp2 This is the RT_BIT_32() of each sub-state
|
---|
2036 | * ORed together.
|
---|
2037 | */
|
---|
2038 | DECL_FORCE_INLINE(uint32_t)
|
---|
2039 | iemNativeLivenessMergeExpandedEFlagsState(uint32_t fMergedStateExp2)
|
---|
2040 | {
|
---|
2041 | /* INPUT trumps anything else. */
|
---|
2042 | if (fMergedStateExp2 & RT_BIT_32(IEMLIVENESS_STATE_INPUT))
|
---|
2043 | return IEMLIVENESS_STATE_INPUT;
|
---|
2044 |
|
---|
2045 | /* CLOBBERED trumps XCPT_OR_CALL and UNUSED. */
|
---|
2046 | if (fMergedStateExp2 & RT_BIT_32(IEMLIVENESS_STATE_CLOBBERED))
|
---|
2047 | {
|
---|
2048 | /* If not all sub-fields are clobbered they must be considered INPUT. */
|
---|
2049 | if (fMergedStateExp2 & (RT_BIT_32(IEMLIVENESS_STATE_UNUSED) | RT_BIT_32(IEMLIVENESS_STATE_XCPT_OR_CALL)))
|
---|
2050 | return IEMLIVENESS_STATE_INPUT;
|
---|
2051 | return IEMLIVENESS_STATE_CLOBBERED;
|
---|
2052 | }
|
---|
2053 |
|
---|
2054 | /* XCPT_OR_CALL trumps UNUSED. */
|
---|
2055 | if (fMergedStateExp2 & RT_BIT_32(IEMLIVENESS_STATE_XCPT_OR_CALL))
|
---|
2056 | return IEMLIVENESS_STATE_XCPT_OR_CALL;
|
---|
2057 |
|
---|
2058 | return IEMLIVENESS_STATE_UNUSED;
|
---|
2059 | }
|
---|
2060 | # endif /* !IEMLIVENESS_EXTENDED_LAYOUT */
|
---|
2061 |
|
---|
2062 |
|
---|
2063 | DECL_FORCE_INLINE(uint32_t)
|
---|
2064 | iemNativeLivenessGetStateByGstRegEx(PCIEMLIVENESSENTRY pLivenessEntry, unsigned enmGstRegEx)
|
---|
2065 | {
|
---|
2066 | # ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
2067 | return ((pLivenessEntry->Bit0.bm64 >> enmGstRegEx) & 1)
|
---|
2068 | | (((pLivenessEntry->Bit1.bm64 >> enmGstRegEx) << 1) & 2);
|
---|
2069 | # else
|
---|
2070 | return ( (pLivenessEntry->Bit0.bm64 >> enmGstRegEx) & 1)
|
---|
2071 | | (((pLivenessEntry->Bit1.bm64 >> enmGstRegEx) << 1) & 2)
|
---|
2072 | | (((pLivenessEntry->Bit2.bm64 >> enmGstRegEx) << 2) & 4)
|
---|
2073 | | (((pLivenessEntry->Bit3.bm64 >> enmGstRegEx) << 2) & 8);
|
---|
2074 | # endif
|
---|
2075 | }
|
---|
2076 |
|
---|
2077 |
|
---|
2078 | DECL_FORCE_INLINE(uint32_t)
|
---|
2079 | iemNativeLivenessGetStateByGstReg(PCIEMLIVENESSENTRY pLivenessEntry, IEMNATIVEGSTREG enmGstReg)
|
---|
2080 | {
|
---|
2081 | uint32_t uRet = iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, (unsigned)enmGstReg);
|
---|
2082 | if (enmGstReg == kIemNativeGstReg_EFlags)
|
---|
2083 | {
|
---|
2084 | /* Merge the eflags states to one. */
|
---|
2085 | # ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
2086 | uRet = RT_BIT_32(uRet);
|
---|
2087 | uRet |= RT_BIT_32(pLivenessEntry->Bit0.fEflCf | (pLivenessEntry->Bit1.fEflCf << 1));
|
---|
2088 | uRet |= RT_BIT_32(pLivenessEntry->Bit0.fEflPf | (pLivenessEntry->Bit1.fEflPf << 1));
|
---|
2089 | uRet |= RT_BIT_32(pLivenessEntry->Bit0.fEflAf | (pLivenessEntry->Bit1.fEflAf << 1));
|
---|
2090 | uRet |= RT_BIT_32(pLivenessEntry->Bit0.fEflZf | (pLivenessEntry->Bit1.fEflZf << 1));
|
---|
2091 | uRet |= RT_BIT_32(pLivenessEntry->Bit0.fEflSf | (pLivenessEntry->Bit1.fEflSf << 1));
|
---|
2092 | uRet |= RT_BIT_32(pLivenessEntry->Bit0.fEflOf | (pLivenessEntry->Bit1.fEflOf << 1));
|
---|
2093 | uRet = iemNativeLivenessMergeExpandedEFlagsState(uRet);
|
---|
2094 | # else
|
---|
2095 | AssertCompile(IEMLIVENESSBIT_IDX_EFL_OTHER == (unsigned)kIemNativeGstReg_EFlags);
|
---|
2096 | uRet |= iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, IEMLIVENESSBIT_IDX_EFL_CF);
|
---|
2097 | uRet |= iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, IEMLIVENESSBIT_IDX_EFL_PF);
|
---|
2098 | uRet |= iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, IEMLIVENESSBIT_IDX_EFL_AF);
|
---|
2099 | uRet |= iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, IEMLIVENESSBIT_IDX_EFL_ZF);
|
---|
2100 | uRet |= iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, IEMLIVENESSBIT_IDX_EFL_SF);
|
---|
2101 | uRet |= iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, IEMLIVENESSBIT_IDX_EFL_OF);
|
---|
2102 | # endif
|
---|
2103 | }
|
---|
2104 | return uRet;
|
---|
2105 | }
|
---|
2106 |
|
---|
2107 |
|
---|
2108 | # ifdef VBOX_STRICT
|
---|
2109 | /** For assertions only, user checks that idxCurCall isn't zerow. */
|
---|
2110 | DECL_FORCE_INLINE(uint32_t)
|
---|
2111 | iemNativeLivenessGetPrevStateByGstReg(PIEMRECOMPILERSTATE pReNative, IEMNATIVEGSTREG enmGstReg)
|
---|
2112 | {
|
---|
2113 | return iemNativeLivenessGetStateByGstReg(&pReNative->paLivenessEntries[pReNative->idxCurCall - 1], enmGstReg);
|
---|
2114 | }
|
---|
2115 | # endif /* VBOX_STRICT */
|
---|
2116 |
|
---|
2117 | #endif /* IEMNATIVE_WITH_LIVENESS_ANALYSIS */
|
---|
2118 |
|
---|
2119 |
|
---|
2120 | /**
|
---|
2121 | * Gets the number of hidden arguments for an expected IEM_MC_CALL statement.
|
---|
2122 | */
|
---|
2123 | DECL_FORCE_INLINE(uint8_t) iemNativeArgGetHiddenArgCount(PIEMRECOMPILERSTATE pReNative)
|
---|
2124 | {
|
---|
2125 | if (pReNative->fCImpl & IEM_CIMPL_F_CALLS_CIMPL)
|
---|
2126 | return IEM_CIMPL_HIDDEN_ARGS;
|
---|
2127 | if (pReNative->fCImpl & (IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE | IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE))
|
---|
2128 | return 1;
|
---|
2129 | return 0;
|
---|
2130 | }
|
---|
2131 |
|
---|
2132 |
|
---|
2133 | DECL_FORCE_INLINE(uint8_t) iemNativeRegMarkAllocated(PIEMRECOMPILERSTATE pReNative, unsigned idxReg,
|
---|
2134 | IEMNATIVEWHAT enmWhat, uint8_t idxVar = UINT8_MAX) RT_NOEXCEPT
|
---|
2135 | {
|
---|
2136 | pReNative->Core.bmHstRegs |= RT_BIT_32(idxReg);
|
---|
2137 |
|
---|
2138 | pReNative->Core.aHstRegs[idxReg].enmWhat = enmWhat;
|
---|
2139 | pReNative->Core.aHstRegs[idxReg].fGstRegShadows = 0;
|
---|
2140 | pReNative->Core.aHstRegs[idxReg].idxVar = idxVar;
|
---|
2141 | return (uint8_t)idxReg;
|
---|
2142 | }
|
---|
2143 |
|
---|
2144 |
|
---|
2145 |
|
---|
2146 | /*********************************************************************************************************************************
|
---|
2147 | * Register Allocator (GPR) *
|
---|
2148 | *********************************************************************************************************************************/
|
---|
2149 |
|
---|
2150 | /**
|
---|
2151 | * Marks host register @a idxHstReg as containing a shadow copy of guest
|
---|
2152 | * register @a enmGstReg.
|
---|
2153 | *
|
---|
2154 | * ASSUMES that caller has made sure @a enmGstReg is not associated with any
|
---|
2155 | * host register before calling.
|
---|
2156 | */
|
---|
2157 | DECL_FORCE_INLINE(void)
|
---|
2158 | iemNativeRegMarkAsGstRegShadow(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg, IEMNATIVEGSTREG enmGstReg, uint32_t off)
|
---|
2159 | {
|
---|
2160 | Assert(!(pReNative->Core.bmGstRegShadows & RT_BIT_64(enmGstReg)));
|
---|
2161 | Assert(!pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows);
|
---|
2162 | Assert((unsigned)enmGstReg < (unsigned)kIemNativeGstReg_End);
|
---|
2163 |
|
---|
2164 | pReNative->Core.aidxGstRegShadows[enmGstReg] = idxHstReg;
|
---|
2165 | pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = RT_BIT_64(enmGstReg); /** @todo why? not OR? */
|
---|
2166 | pReNative->Core.bmGstRegShadows |= RT_BIT_64(enmGstReg);
|
---|
2167 | pReNative->Core.bmHstRegsWithGstShadow |= RT_BIT_32(idxHstReg);
|
---|
2168 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
2169 | iemNativeDbgInfoAddNativeOffset(pReNative, off);
|
---|
2170 | iemNativeDbgInfoAddGuestRegShadowing(pReNative, enmGstReg, idxHstReg);
|
---|
2171 | #else
|
---|
2172 | RT_NOREF(off);
|
---|
2173 | #endif
|
---|
2174 | }
|
---|
2175 |
|
---|
2176 |
|
---|
2177 | /**
|
---|
2178 | * Clear any guest register shadow claims from @a idxHstReg.
|
---|
2179 | *
|
---|
2180 | * The register does not need to be shadowing any guest registers.
|
---|
2181 | */
|
---|
2182 | DECL_FORCE_INLINE(void)
|
---|
2183 | iemNativeRegClearGstRegShadowing(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg, uint32_t off)
|
---|
2184 | {
|
---|
2185 | Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows)
|
---|
2186 | == pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows
|
---|
2187 | && pReNative->Core.bmGstRegShadows < RT_BIT_64(kIemNativeGstReg_End));
|
---|
2188 | Assert( RT_BOOL(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxHstReg))
|
---|
2189 | == RT_BOOL(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows));
|
---|
2190 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
2191 | Assert(!(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & pReNative->Core.bmGstRegShadowDirty));
|
---|
2192 | #endif
|
---|
2193 |
|
---|
2194 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
2195 | uint64_t fGstRegs = pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows;
|
---|
2196 | if (fGstRegs)
|
---|
2197 | {
|
---|
2198 | Assert(fGstRegs < RT_BIT_64(kIemNativeGstReg_End));
|
---|
2199 | iemNativeDbgInfoAddNativeOffset(pReNative, off);
|
---|
2200 | while (fGstRegs)
|
---|
2201 | {
|
---|
2202 | unsigned const iGstReg = ASMBitFirstSetU64(fGstRegs) - 1;
|
---|
2203 | fGstRegs &= ~RT_BIT_64(iGstReg);
|
---|
2204 | iemNativeDbgInfoAddGuestRegShadowing(pReNative, (IEMNATIVEGSTREG)iGstReg, UINT8_MAX, idxHstReg);
|
---|
2205 | }
|
---|
2206 | }
|
---|
2207 | #else
|
---|
2208 | RT_NOREF(off);
|
---|
2209 | #endif
|
---|
2210 |
|
---|
2211 | pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxHstReg);
|
---|
2212 | pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows;
|
---|
2213 | pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = 0;
|
---|
2214 | }
|
---|
2215 |
|
---|
2216 |
|
---|
2217 | /**
|
---|
2218 | * Clear guest register shadow claim regarding @a enmGstReg from @a idxHstReg
|
---|
2219 | * and global overview flags.
|
---|
2220 | */
|
---|
2221 | DECL_FORCE_INLINE(void)
|
---|
2222 | iemNativeRegClearGstRegShadowingOne(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg, IEMNATIVEGSTREG enmGstReg, uint32_t off)
|
---|
2223 | {
|
---|
2224 | Assert(pReNative->Core.bmGstRegShadows < RT_BIT_64(kIemNativeGstReg_End));
|
---|
2225 | Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows)
|
---|
2226 | == pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows
|
---|
2227 | && pReNative->Core.bmGstRegShadows < RT_BIT_64(kIemNativeGstReg_End));
|
---|
2228 | Assert(pReNative->Core.bmGstRegShadows & RT_BIT_64(enmGstReg));
|
---|
2229 | Assert(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & RT_BIT_64(enmGstReg));
|
---|
2230 | Assert(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxHstReg));
|
---|
2231 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
2232 | Assert(!(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & pReNative->Core.bmGstRegShadowDirty));
|
---|
2233 | #endif
|
---|
2234 |
|
---|
2235 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
2236 | iemNativeDbgInfoAddNativeOffset(pReNative, off);
|
---|
2237 | iemNativeDbgInfoAddGuestRegShadowing(pReNative, enmGstReg, UINT8_MAX, idxHstReg);
|
---|
2238 | #else
|
---|
2239 | RT_NOREF(off);
|
---|
2240 | #endif
|
---|
2241 |
|
---|
2242 | uint64_t const fGstRegShadowsNew = pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & ~RT_BIT_64(enmGstReg);
|
---|
2243 | pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = fGstRegShadowsNew;
|
---|
2244 | if (!fGstRegShadowsNew)
|
---|
2245 | pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxHstReg);
|
---|
2246 | pReNative->Core.bmGstRegShadows &= ~RT_BIT_64(enmGstReg);
|
---|
2247 | }
|
---|
2248 |
|
---|
2249 |
|
---|
2250 | #if 0 /* unused */
|
---|
2251 | /**
|
---|
2252 | * Clear any guest register shadow claim for @a enmGstReg.
|
---|
2253 | */
|
---|
2254 | DECL_FORCE_INLINE(void)
|
---|
2255 | iemNativeRegClearGstRegShadowingByGstReg(PIEMRECOMPILERSTATE pReNative, IEMNATIVEGSTREG enmGstReg, uint32_t off)
|
---|
2256 | {
|
---|
2257 | Assert(pReNative->Core.bmGstRegShadows < RT_BIT_64(kIemNativeGstReg_End));
|
---|
2258 | if (pReNative->Core.bmGstRegShadows & RT_BIT_64(enmGstReg))
|
---|
2259 | {
|
---|
2260 | Assert(pReNative->Core.aidxGstRegShadows[enmGstReg] < RT_ELEMENTS(pReNative->Core.aHstRegs));
|
---|
2261 | iemNativeRegClearGstRegShadowingOne(pReNative, pReNative->Core.aidxGstRegShadows[enmGstReg], enmGstReg, off);
|
---|
2262 | }
|
---|
2263 | }
|
---|
2264 | #endif
|
---|
2265 |
|
---|
2266 |
|
---|
2267 | /**
|
---|
2268 | * Clear any guest register shadow claim for @a enmGstReg and mark @a idxHstRegNew
|
---|
2269 | * as the new shadow of it.
|
---|
2270 | *
|
---|
2271 | * Unlike the other guest reg shadow helpers, this does the logging for you.
|
---|
2272 | * However, it is the liveness state is not asserted here, the caller must do
|
---|
2273 | * that.
|
---|
2274 | */
|
---|
2275 | DECL_FORCE_INLINE(void)
|
---|
2276 | iemNativeRegClearAndMarkAsGstRegShadow(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstRegNew,
|
---|
2277 | IEMNATIVEGSTREG enmGstReg, uint32_t off)
|
---|
2278 | {
|
---|
2279 | Assert(pReNative->Core.bmGstRegShadows < RT_BIT_64(kIemNativeGstReg_End));
|
---|
2280 | if (pReNative->Core.bmGstRegShadows & RT_BIT_64(enmGstReg))
|
---|
2281 | {
|
---|
2282 | uint8_t const idxHstRegOld = pReNative->Core.aidxGstRegShadows[enmGstReg];
|
---|
2283 | Assert(idxHstRegOld < RT_ELEMENTS(pReNative->Core.aHstRegs));
|
---|
2284 | if (idxHstRegOld == idxHstRegNew)
|
---|
2285 | return;
|
---|
2286 | Log12(("iemNativeRegClearAndMarkAsGstRegShadow: %s for guest %s (from %s)\n", g_apszIemNativeHstRegNames[idxHstRegNew],
|
---|
2287 | g_aGstShadowInfo[enmGstReg].pszName, g_apszIemNativeHstRegNames[idxHstRegOld]));
|
---|
2288 | iemNativeRegClearGstRegShadowingOne(pReNative, pReNative->Core.aidxGstRegShadows[enmGstReg], enmGstReg, off);
|
---|
2289 | }
|
---|
2290 | else
|
---|
2291 | Log12(("iemNativeRegClearAndMarkAsGstRegShadow: %s for guest %s\n", g_apszIemNativeHstRegNames[idxHstRegNew],
|
---|
2292 | g_aGstShadowInfo[enmGstReg].pszName));
|
---|
2293 | iemNativeRegMarkAsGstRegShadow(pReNative, idxHstRegNew, enmGstReg, off);
|
---|
2294 | }
|
---|
2295 |
|
---|
2296 |
|
---|
2297 | /**
|
---|
2298 | * Transfers the guest register shadow claims of @a enmGstReg from @a idxRegFrom
|
---|
2299 | * to @a idxRegTo.
|
---|
2300 | */
|
---|
2301 | DECL_FORCE_INLINE(void)
|
---|
2302 | iemNativeRegTransferGstRegShadowing(PIEMRECOMPILERSTATE pReNative, uint8_t idxRegFrom, uint8_t idxRegTo,
|
---|
2303 | IEMNATIVEGSTREG enmGstReg, uint32_t off)
|
---|
2304 | {
|
---|
2305 | Assert(pReNative->Core.aHstRegs[idxRegFrom].fGstRegShadows & RT_BIT_64(enmGstReg));
|
---|
2306 | Assert(pReNative->Core.aidxGstRegShadows[enmGstReg] == idxRegFrom);
|
---|
2307 | Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxRegFrom].fGstRegShadows)
|
---|
2308 | == pReNative->Core.aHstRegs[idxRegFrom].fGstRegShadows
|
---|
2309 | && pReNative->Core.bmGstRegShadows < RT_BIT_64(kIemNativeGstReg_End));
|
---|
2310 | Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxRegTo].fGstRegShadows)
|
---|
2311 | == pReNative->Core.aHstRegs[idxRegTo].fGstRegShadows);
|
---|
2312 | Assert( RT_BOOL(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxRegFrom))
|
---|
2313 | == RT_BOOL(pReNative->Core.aHstRegs[idxRegFrom].fGstRegShadows));
|
---|
2314 |
|
---|
2315 | uint64_t const fGstRegShadowsFrom = pReNative->Core.aHstRegs[idxRegFrom].fGstRegShadows & ~RT_BIT_64(enmGstReg);
|
---|
2316 | pReNative->Core.aHstRegs[idxRegFrom].fGstRegShadows = fGstRegShadowsFrom;
|
---|
2317 | if (!fGstRegShadowsFrom)
|
---|
2318 | pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxRegFrom);
|
---|
2319 | pReNative->Core.bmHstRegsWithGstShadow |= RT_BIT_32(idxRegTo);
|
---|
2320 | pReNative->Core.aHstRegs[idxRegTo].fGstRegShadows |= RT_BIT_64(enmGstReg);
|
---|
2321 | pReNative->Core.aidxGstRegShadows[enmGstReg] = idxRegTo;
|
---|
2322 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
2323 | iemNativeDbgInfoAddNativeOffset(pReNative, off);
|
---|
2324 | iemNativeDbgInfoAddGuestRegShadowing(pReNative, enmGstReg, idxRegTo, idxRegFrom);
|
---|
2325 | #else
|
---|
2326 | RT_NOREF(off);
|
---|
2327 | #endif
|
---|
2328 | }
|
---|
2329 |
|
---|
2330 |
|
---|
2331 | /**
|
---|
2332 | * Flushes any delayed guest register writes.
|
---|
2333 | *
|
---|
2334 | * This must be called prior to calling CImpl functions and any helpers that use
|
---|
2335 | * the guest state (like raising exceptions) and such.
|
---|
2336 | *
|
---|
2337 | * This optimization has not yet been implemented. The first target would be
|
---|
2338 | * RIP updates, since these are the most common ones.
|
---|
2339 | *
|
---|
2340 | * @note This function does not flush any shadowing information for guest registers. This needs to be done by
|
---|
2341 | * the caller if it wishes to do so.
|
---|
2342 | */
|
---|
2343 | DECL_INLINE_THROW(uint32_t)
|
---|
2344 | iemNativeRegFlushPendingWrites(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint64_t fGstShwExcept = 0, uint64_t fGstSimdShwExcept = 0)
|
---|
2345 | {
|
---|
2346 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
2347 | uint64_t const bmGstRegShadowDirty = pReNative->Core.bmGstRegShadowDirty & ~fGstShwExcept;
|
---|
2348 | #else
|
---|
2349 | uint64_t const bmGstRegShadowDirty = 0;
|
---|
2350 | #endif
|
---|
2351 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
2352 | uint64_t const bmGstSimdRegShadowDirty = (pReNative->Core.bmGstSimdRegShadowDirtyLo128 | pReNative->Core.bmGstSimdRegShadowDirtyHi128)
|
---|
2353 | & ~fGstSimdShwExcept;
|
---|
2354 | #else
|
---|
2355 | uint64_t const bmGstSimdRegShadowDirty = 0;
|
---|
2356 | #endif
|
---|
2357 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
2358 | uint64_t const fWritebackPc = ~(fGstShwExcept & kIemNativeGstReg_Pc);
|
---|
2359 | #else
|
---|
2360 | uint64_t const fWritebackPc = 0;
|
---|
2361 | #endif
|
---|
2362 | if (bmGstRegShadowDirty | bmGstSimdRegShadowDirty | fWritebackPc)
|
---|
2363 | return iemNativeRegFlushPendingWritesSlow(pReNative, off, fGstShwExcept, fGstSimdShwExcept);
|
---|
2364 |
|
---|
2365 | return off;
|
---|
2366 | }
|
---|
2367 |
|
---|
2368 |
|
---|
2369 |
|
---|
2370 | /*********************************************************************************************************************************
|
---|
2371 | * SIMD register allocator (largely code duplication of the GPR allocator for now but might diverge) *
|
---|
2372 | *********************************************************************************************************************************/
|
---|
2373 |
|
---|
2374 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
2375 |
|
---|
2376 | DECL_FORCE_INLINE(uint8_t)
|
---|
2377 | iemNativeSimdRegMarkAllocated(PIEMRECOMPILERSTATE pReNative, uint8_t idxSimdReg,
|
---|
2378 | IEMNATIVEWHAT enmWhat, uint8_t idxVar = UINT8_MAX) RT_NOEXCEPT
|
---|
2379 | {
|
---|
2380 | pReNative->Core.bmHstSimdRegs |= RT_BIT_32(idxSimdReg);
|
---|
2381 |
|
---|
2382 | pReNative->Core.aHstSimdRegs[idxSimdReg].enmWhat = enmWhat;
|
---|
2383 | pReNative->Core.aHstSimdRegs[idxSimdReg].idxVar = idxVar;
|
---|
2384 | pReNative->Core.aHstSimdRegs[idxSimdReg].fGstRegShadows = 0;
|
---|
2385 | return idxSimdReg;
|
---|
2386 | }
|
---|
2387 |
|
---|
2388 |
|
---|
2389 | /**
|
---|
2390 | * Marks host SIMD register @a idxHstSimdReg as containing a shadow copy of guest
|
---|
2391 | * SIMD register @a enmGstSimdReg.
|
---|
2392 | *
|
---|
2393 | * ASSUMES that caller has made sure @a enmGstSimdReg is not associated with any
|
---|
2394 | * host register before calling.
|
---|
2395 | */
|
---|
2396 | DECL_FORCE_INLINE(void)
|
---|
2397 | iemNativeSimdRegMarkAsGstSimdRegShadow(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstSimdReg,
|
---|
2398 | IEMNATIVEGSTSIMDREG enmGstSimdReg, uint32_t off)
|
---|
2399 | {
|
---|
2400 | Assert(!(pReNative->Core.bmGstSimdRegShadows & RT_BIT_64(enmGstSimdReg)));
|
---|
2401 | Assert(!pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows);
|
---|
2402 | Assert((unsigned)enmGstSimdReg < (unsigned)kIemNativeGstSimdReg_End);
|
---|
2403 |
|
---|
2404 | pReNative->Core.aidxGstSimdRegShadows[enmGstSimdReg] = idxHstSimdReg;
|
---|
2405 | pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows |= RT_BIT_64(enmGstSimdReg);
|
---|
2406 | pReNative->Core.bmGstSimdRegShadows |= RT_BIT_64(enmGstSimdReg);
|
---|
2407 | pReNative->Core.bmHstSimdRegsWithGstShadow |= RT_BIT_32(idxHstSimdReg);
|
---|
2408 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
2409 | iemNativeDbgInfoAddNativeOffset(pReNative, off);
|
---|
2410 | iemNativeDbgInfoAddGuestSimdRegShadowing(pReNative, enmGstSimdReg, idxHstSimdReg);
|
---|
2411 | #else
|
---|
2412 | RT_NOREF(off);
|
---|
2413 | #endif
|
---|
2414 | }
|
---|
2415 |
|
---|
2416 |
|
---|
2417 | /**
|
---|
2418 | * Transfers the guest SIMD register shadow claims of @a enmGstSimdReg from @a idxSimdRegFrom
|
---|
2419 | * to @a idxSimdRegTo.
|
---|
2420 | */
|
---|
2421 | DECL_FORCE_INLINE(void)
|
---|
2422 | iemNativeSimdRegTransferGstSimdRegShadowing(PIEMRECOMPILERSTATE pReNative, uint8_t idxSimdRegFrom, uint8_t idxSimdRegTo,
|
---|
2423 | IEMNATIVEGSTSIMDREG enmGstSimdReg, uint32_t off)
|
---|
2424 | {
|
---|
2425 | Assert(pReNative->Core.aHstSimdRegs[idxSimdRegFrom].fGstRegShadows & RT_BIT_64(enmGstSimdReg));
|
---|
2426 | Assert(pReNative->Core.aidxGstSimdRegShadows[enmGstSimdReg] == idxSimdRegFrom);
|
---|
2427 | Assert( (pReNative->Core.bmGstSimdRegShadows & pReNative->Core.aHstSimdRegs[idxSimdRegFrom].fGstRegShadows)
|
---|
2428 | == pReNative->Core.aHstSimdRegs[idxSimdRegFrom].fGstRegShadows
|
---|
2429 | && pReNative->Core.bmGstSimdRegShadows < RT_BIT_64(kIemNativeGstReg_End));
|
---|
2430 | Assert( (pReNative->Core.bmGstSimdRegShadows & pReNative->Core.aHstSimdRegs[idxSimdRegTo].fGstRegShadows)
|
---|
2431 | == pReNative->Core.aHstSimdRegs[idxSimdRegTo].fGstRegShadows);
|
---|
2432 | Assert( RT_BOOL(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxSimdRegFrom))
|
---|
2433 | == RT_BOOL(pReNative->Core.aHstSimdRegs[idxSimdRegFrom].fGstRegShadows));
|
---|
2434 | Assert( pReNative->Core.aHstSimdRegs[idxSimdRegFrom].enmLoaded
|
---|
2435 | == pReNative->Core.aHstSimdRegs[idxSimdRegTo].enmLoaded);
|
---|
2436 |
|
---|
2437 | uint64_t const fGstRegShadowsFrom = pReNative->Core.aHstSimdRegs[idxSimdRegFrom].fGstRegShadows & ~RT_BIT_64(enmGstSimdReg);
|
---|
2438 | pReNative->Core.aHstSimdRegs[idxSimdRegFrom].fGstRegShadows = fGstRegShadowsFrom;
|
---|
2439 | if (!fGstRegShadowsFrom)
|
---|
2440 | {
|
---|
2441 | pReNative->Core.bmHstSimdRegsWithGstShadow &= ~RT_BIT_32(idxSimdRegFrom);
|
---|
2442 | pReNative->Core.aHstSimdRegs[idxSimdRegFrom].enmLoaded = kIemNativeGstSimdRegLdStSz_Invalid;
|
---|
2443 | }
|
---|
2444 | pReNative->Core.bmHstSimdRegsWithGstShadow |= RT_BIT_32(idxSimdRegTo);
|
---|
2445 | pReNative->Core.aHstSimdRegs[idxSimdRegTo].fGstRegShadows |= RT_BIT_64(enmGstSimdReg);
|
---|
2446 | pReNative->Core.aidxGstSimdRegShadows[enmGstSimdReg] = idxSimdRegTo;
|
---|
2447 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
2448 | iemNativeDbgInfoAddNativeOffset(pReNative, off);
|
---|
2449 | iemNativeDbgInfoAddGuestSimdRegShadowing(pReNative, enmGstSimdReg, idxSimdRegTo, idxSimdRegFrom);
|
---|
2450 | #else
|
---|
2451 | RT_NOREF(off);
|
---|
2452 | #endif
|
---|
2453 | }
|
---|
2454 |
|
---|
2455 |
|
---|
2456 | /**
|
---|
2457 | * Clear any guest register shadow claims from @a idxHstSimdReg.
|
---|
2458 | *
|
---|
2459 | * The register does not need to be shadowing any guest registers.
|
---|
2460 | */
|
---|
2461 | DECL_FORCE_INLINE(void)
|
---|
2462 | iemNativeSimdRegClearGstSimdRegShadowing(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstSimdReg, uint32_t off)
|
---|
2463 | {
|
---|
2464 | Assert( (pReNative->Core.bmGstSimdRegShadows & pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows)
|
---|
2465 | == pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows
|
---|
2466 | && pReNative->Core.bmGstSimdRegShadows < RT_BIT_64(kIemNativeGstSimdReg_End));
|
---|
2467 | Assert( RT_BOOL(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxHstSimdReg))
|
---|
2468 | == RT_BOOL(pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows));
|
---|
2469 | Assert( !(pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows & pReNative->Core.bmGstSimdRegShadowDirtyLo128)
|
---|
2470 | && !(pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows & pReNative->Core.bmGstSimdRegShadowDirtyHi128));
|
---|
2471 |
|
---|
2472 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
2473 | uint64_t fGstRegs = pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows;
|
---|
2474 | if (fGstRegs)
|
---|
2475 | {
|
---|
2476 | Assert(fGstRegs < RT_BIT_64(kIemNativeGstSimdReg_End));
|
---|
2477 | iemNativeDbgInfoAddNativeOffset(pReNative, off);
|
---|
2478 | while (fGstRegs)
|
---|
2479 | {
|
---|
2480 | unsigned const iGstReg = ASMBitFirstSetU64(fGstRegs) - 1;
|
---|
2481 | fGstRegs &= ~RT_BIT_64(iGstReg);
|
---|
2482 | iemNativeDbgInfoAddGuestSimdRegShadowing(pReNative, (IEMNATIVEGSTSIMDREG)iGstReg, UINT8_MAX, idxHstSimdReg);
|
---|
2483 | }
|
---|
2484 | }
|
---|
2485 | #else
|
---|
2486 | RT_NOREF(off);
|
---|
2487 | #endif
|
---|
2488 |
|
---|
2489 | pReNative->Core.bmHstSimdRegsWithGstShadow &= ~RT_BIT_32(idxHstSimdReg);
|
---|
2490 | pReNative->Core.bmGstSimdRegShadows &= ~pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows;
|
---|
2491 | pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows = 0;
|
---|
2492 | pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded = kIemNativeGstSimdRegLdStSz_Invalid;
|
---|
2493 | }
|
---|
2494 |
|
---|
2495 | #endif /* IEMNATIVE_WITH_SIMD_REG_ALLOCATOR */
|
---|
2496 |
|
---|
2497 |
|
---|
2498 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
2499 | /**
|
---|
2500 | * Emits code to update the guest RIP value by adding the current offset since the start of the last RIP update.
|
---|
2501 | */
|
---|
2502 | DECL_INLINE_THROW(uint32_t) iemNativeEmitPcWriteback(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
2503 | {
|
---|
2504 | if (pReNative->Core.offPc)
|
---|
2505 | return iemNativeEmitPcWritebackSlow(pReNative, off);
|
---|
2506 | return off;
|
---|
2507 | }
|
---|
2508 | #endif /* IEMNATIVE_WITH_DELAYED_PC_UPDATING */
|
---|
2509 |
|
---|
2510 |
|
---|
2511 | #ifdef IEMNATIVE_WITH_RECOMPILER_PROLOGUE_SINGLETON
|
---|
2512 | /** @note iemNativeTbEntry returns VBOXSTRICTRC, but we don't declare it as
|
---|
2513 | * it saves us the trouble of a hidden parameter on MSC/amd64. */
|
---|
2514 | # ifdef RT_ARCH_AMD64
|
---|
2515 | extern "C" IEM_DECL_NATIVE_HLP_DEF(int, iemNativeTbEntry, (PVMCPUCC pVCpu, uintptr_t pfnTbBody));
|
---|
2516 | # elif defined(RT_ARCH_ARM64)
|
---|
2517 | extern "C" IEM_DECL_NATIVE_HLP_DEF(int, iemNativeTbEntry, (PVMCPUCC pVCpu, PCPUMCTX pCpumCtx, uintptr_t pfnTbBody));
|
---|
2518 | # endif
|
---|
2519 | #endif
|
---|
2520 |
|
---|
2521 | #endif /* !RT_IN_ASSEMBLER - ASM-NOINC-END */
|
---|
2522 |
|
---|
2523 | /** @} */
|
---|
2524 |
|
---|
2525 | #endif /* !VMM_INCLUDED_SRC_include_IEMN8veRecompiler_h */
|
---|
2526 |
|
---|