1 | /* $Id: IEMAllN8veRecompFuncs.h 103846 2024-03-14 11:28:41Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * IEM - Native Recompiler - Inlined Bits.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2023 Oracle and/or its affiliates.
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox base platform packages, as
|
---|
10 | * available from https://www.virtualbox.org.
|
---|
11 | *
|
---|
12 | * This program is free software; you can redistribute it and/or
|
---|
13 | * modify it under the terms of the GNU General Public License
|
---|
14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
15 | * License.
|
---|
16 | *
|
---|
17 | * This program is distributed in the hope that it will be useful, but
|
---|
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
20 | * General Public License for more details.
|
---|
21 | *
|
---|
22 | * You should have received a copy of the GNU General Public License
|
---|
23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
24 | *
|
---|
25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
26 | */
|
---|
27 |
|
---|
28 |
|
---|
29 | /*********************************************************************************************************************************
|
---|
30 | * Header Files *
|
---|
31 | *********************************************************************************************************************************/
|
---|
32 | #define LOG_GROUP LOG_GROUP_IEM_RE_NATIVE
|
---|
33 | #define IEM_WITH_OPAQUE_DECODER_STATE
|
---|
34 | #define VMCPU_INCL_CPUM_GST_CTX
|
---|
35 | #define VMM_INCLUDED_SRC_include_IEMMc_h /* block IEMMc.h inclusion. */
|
---|
36 | #define IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES
|
---|
37 | #include <VBox/vmm/iem.h>
|
---|
38 | #include <VBox/vmm/cpum.h>
|
---|
39 | #include <VBox/vmm/dbgf.h>
|
---|
40 | #include "IEMInternal.h"
|
---|
41 | #include <VBox/vmm/vmcc.h>
|
---|
42 | #include <VBox/log.h>
|
---|
43 | #include <VBox/err.h>
|
---|
44 | #include <VBox/dis.h>
|
---|
45 | #include <VBox/param.h>
|
---|
46 | #include <iprt/assert.h>
|
---|
47 | #include <iprt/heap.h>
|
---|
48 | #include <iprt/mem.h>
|
---|
49 | #include <iprt/string.h>
|
---|
50 | #if defined(RT_ARCH_AMD64)
|
---|
51 | # include <iprt/x86.h>
|
---|
52 | #elif defined(RT_ARCH_ARM64)
|
---|
53 | # include <iprt/armv8.h>
|
---|
54 | #endif
|
---|
55 |
|
---|
56 | #include "IEMInline.h"
|
---|
57 | #include "IEMThreadedFunctions.h"
|
---|
58 | #include "IEMN8veRecompiler.h"
|
---|
59 | #include "IEMN8veRecompilerEmit.h"
|
---|
60 | #include "IEMN8veRecompilerTlbLookup.h"
|
---|
61 | #include "IEMNativeFunctions.h"
|
---|
62 |
|
---|
63 |
|
---|
64 | /*
|
---|
65 | * Narrow down configs here to avoid wasting time on unused configs here.
|
---|
66 | * Note! Same checks in IEMAllThrdRecompiler.cpp.
|
---|
67 | */
|
---|
68 |
|
---|
69 | #ifndef IEM_WITH_CODE_TLB
|
---|
70 | # error The code TLB must be enabled for the recompiler.
|
---|
71 | #endif
|
---|
72 |
|
---|
73 | #ifndef IEM_WITH_DATA_TLB
|
---|
74 | # error The data TLB must be enabled for the recompiler.
|
---|
75 | #endif
|
---|
76 |
|
---|
77 | #ifndef IEM_WITH_SETJMP
|
---|
78 | # error The setjmp approach must be enabled for the recompiler.
|
---|
79 | #endif
|
---|
80 |
|
---|
81 |
|
---|
82 |
|
---|
83 | /*********************************************************************************************************************************
|
---|
84 | * Code emitters for flushing pending guest register writes and sanity checks *
|
---|
85 | *********************************************************************************************************************************/
|
---|
86 |
|
---|
87 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
88 | # if defined(IEMNATIVE_REG_FIXED_PC_DBG)
|
---|
89 | DECL_INLINE_THROW(uint32_t) iemNativePcAdjustCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
90 | {
|
---|
91 | /* Compare the shadow with the context value, they should match. */
|
---|
92 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_REG_FIXED_TMP1, IEMNATIVE_REG_FIXED_PC_DBG);
|
---|
93 | off = iemNativeEmitAddGprImm(pReNative, off, IEMNATIVE_REG_FIXED_TMP1, pReNative->Core.offPc);
|
---|
94 | off = iemNativeEmitGuestRegValueCheck(pReNative, off, IEMNATIVE_REG_FIXED_TMP1, kIemNativeGstReg_Pc);
|
---|
95 | return off;
|
---|
96 | }
|
---|
97 | # endif
|
---|
98 | #endif /* IEMNATIVE_WITH_DELAYED_PC_UPDATING */
|
---|
99 |
|
---|
100 | /**
|
---|
101 | * Flushes delayed write of a specific guest register.
|
---|
102 | *
|
---|
103 | * This must be called prior to calling CImpl functions and any helpers that use
|
---|
104 | * the guest state (like raising exceptions) and such.
|
---|
105 | *
|
---|
106 | * This optimization has not yet been implemented. The first target would be
|
---|
107 | * RIP updates, since these are the most common ones.
|
---|
108 | */
|
---|
109 | DECL_INLINE_THROW(uint32_t)
|
---|
110 | iemNativeRegFlushPendingSpecificWrite(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVEGSTREGREF enmClass, uint8_t idxReg)
|
---|
111 | {
|
---|
112 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
113 | /* If for whatever reason it is possible to reference the PC register at some point we need to do the writeback here first. */
|
---|
114 | #endif
|
---|
115 |
|
---|
116 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
117 | if ( enmClass == kIemNativeGstRegRef_XReg
|
---|
118 | && pReNative->Core.bmGstSimdRegShadows & RT_BIT_64(idxReg))
|
---|
119 | {
|
---|
120 | off = iemNativeSimdRegFlushPendingWrite(pReNative, off, IEMNATIVEGSTSIMDREG_SIMD(idxReg));
|
---|
121 | /* Flush the shadows as the register needs to be reloaded (there is no guarantee right now, that the referenced register doesn't change). */
|
---|
122 | uint8_t const idxHstSimdReg = pReNative->Core.aidxGstSimdRegShadows[idxReg];
|
---|
123 |
|
---|
124 | iemNativeSimdRegClearGstSimdRegShadowing(pReNative, idxHstSimdReg, off);
|
---|
125 | iemNativeSimdRegFlushGuestShadows(pReNative, RT_BIT_64(IEMNATIVEGSTSIMDREG_SIMD(idxReg)));
|
---|
126 | }
|
---|
127 | #endif
|
---|
128 | RT_NOREF(pReNative, enmClass, idxReg);
|
---|
129 | return off;
|
---|
130 | }
|
---|
131 |
|
---|
132 |
|
---|
133 |
|
---|
134 | /*********************************************************************************************************************************
|
---|
135 | * Emitters for IEM_MC_BEGIN and IEM_MC_END. *
|
---|
136 | *********************************************************************************************************************************/
|
---|
137 |
|
---|
138 | #define IEM_MC_BEGIN(a_cArgs, a_cLocals, a_fMcFlags, a_fCImplFlags) \
|
---|
139 | { \
|
---|
140 | Assert(pReNative->Core.bmVars == 0); \
|
---|
141 | Assert(pReNative->Core.u64ArgVars == UINT64_MAX); \
|
---|
142 | Assert(pReNative->Core.bmStack == 0); \
|
---|
143 | pReNative->fMc = (a_fMcFlags); \
|
---|
144 | pReNative->fCImpl = (a_fCImplFlags); \
|
---|
145 | pReNative->cArgs = ((a_cArgs) + iemNativeArgGetHiddenArgCount(pReNative))
|
---|
146 |
|
---|
147 | /** We have to get to the end in recompilation mode, as otherwise we won't
|
---|
148 | * generate code for all the IEM_MC_IF_XXX branches. */
|
---|
149 | #define IEM_MC_END() \
|
---|
150 | iemNativeVarFreeAll(pReNative); \
|
---|
151 | } return off
|
---|
152 |
|
---|
153 |
|
---|
154 |
|
---|
155 | /*********************************************************************************************************************************
|
---|
156 | * Native Emitter Support. *
|
---|
157 | *********************************************************************************************************************************/
|
---|
158 |
|
---|
159 | #define IEM_MC_NATIVE_IF(a_fSupportedHosts) if (RT_ARCH_VAL & (a_fSupportedHosts)) {
|
---|
160 |
|
---|
161 | #define IEM_MC_NATIVE_ELSE() } else {
|
---|
162 |
|
---|
163 | #define IEM_MC_NATIVE_ENDIF() } ((void)0)
|
---|
164 |
|
---|
165 |
|
---|
166 | #define IEM_MC_NATIVE_EMIT_0(a_fnEmitter) \
|
---|
167 | off = a_fnEmitter(pReNative, off)
|
---|
168 |
|
---|
169 | #define IEM_MC_NATIVE_EMIT_1(a_fnEmitter, a0) \
|
---|
170 | off = a_fnEmitter(pReNative, off, (a0))
|
---|
171 |
|
---|
172 | #define IEM_MC_NATIVE_EMIT_2(a_fnEmitter, a0, a1) \
|
---|
173 | off = a_fnEmitter(pReNative, off, (a0), (a1))
|
---|
174 |
|
---|
175 | #define IEM_MC_NATIVE_EMIT_3(a_fnEmitter, a0, a1, a2) \
|
---|
176 | off = a_fnEmitter(pReNative, off, (a0), (a1), (a2))
|
---|
177 |
|
---|
178 | #define IEM_MC_NATIVE_EMIT_4(a_fnEmitter, a0, a1, a2, a3) \
|
---|
179 | off = a_fnEmitter(pReNative, off, (a0), (a1), (a2), (a3))
|
---|
180 |
|
---|
181 | #define IEM_MC_NATIVE_EMIT_5(a_fnEmitter, a0, a1, a2, a3, a4) \
|
---|
182 | off = a_fnEmitter(pReNative, off, (a0), (a1), (a2), (a3), (a4))
|
---|
183 |
|
---|
184 | #define IEM_MC_NATIVE_EMIT_6(a_fnEmitter, a0, a1, a2, a3, a4, a5) \
|
---|
185 | off = a_fnEmitter(pReNative, off, (a0), (a1), (a2), (a3), (a4), (a5))
|
---|
186 |
|
---|
187 | #define IEM_MC_NATIVE_EMIT_7(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6) \
|
---|
188 | off = a_fnEmitter(pReNative, off, (a0), (a1), (a2), (a3), (a4), (a5), (a6))
|
---|
189 |
|
---|
190 | #define IEM_MC_NATIVE_EMIT_8(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6, a7) \
|
---|
191 | off = a_fnEmitter(pReNative, off, (a0), (a1), (a2), (a3), (a4), (a5), (a6), (a7))
|
---|
192 |
|
---|
193 |
|
---|
194 |
|
---|
195 | /*********************************************************************************************************************************
|
---|
196 | * Emitters for standalone C-implementation deferals (IEM_MC_DEFER_TO_CIMPL_XXXX) *
|
---|
197 | *********************************************************************************************************************************/
|
---|
198 |
|
---|
199 | #define IEM_MC_DEFER_TO_CIMPL_0_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
|
---|
200 | pReNative->fMc = 0; \
|
---|
201 | pReNative->fCImpl = (a_fFlags); \
|
---|
202 | return iemNativeEmitCImplCall0(pReNative, off, pCallEntry->idxInstr, a_fGstShwFlush, (uintptr_t)a_pfnCImpl, a_cbInstr) /** @todo not used ... */
|
---|
203 |
|
---|
204 |
|
---|
205 | #define IEM_MC_DEFER_TO_CIMPL_1_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
|
---|
206 | pReNative->fMc = 0; \
|
---|
207 | pReNative->fCImpl = (a_fFlags); \
|
---|
208 | return iemNativeEmitCImplCall1(pReNative, off, pCallEntry->idxInstr, a_fGstShwFlush, (uintptr_t)a_pfnCImpl, a_cbInstr, a0)
|
---|
209 |
|
---|
210 | DECL_INLINE_THROW(uint32_t) iemNativeEmitCImplCall1(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
211 | uint8_t idxInstr, uint64_t a_fGstShwFlush,
|
---|
212 | uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0)
|
---|
213 | {
|
---|
214 | return iemNativeEmitCImplCall(pReNative, off, idxInstr, a_fGstShwFlush, pfnCImpl, cbInstr, 1, uArg0, 0, 0);
|
---|
215 | }
|
---|
216 |
|
---|
217 |
|
---|
218 | #define IEM_MC_DEFER_TO_CIMPL_2_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
|
---|
219 | pReNative->fMc = 0; \
|
---|
220 | pReNative->fCImpl = (a_fFlags); \
|
---|
221 | return iemNativeEmitCImplCall2(pReNative, off, pCallEntry->idxInstr, a_fGstShwFlush, \
|
---|
222 | (uintptr_t)a_pfnCImpl, a_cbInstr, a0, a1)
|
---|
223 |
|
---|
224 | DECL_INLINE_THROW(uint32_t) iemNativeEmitCImplCall2(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
225 | uint8_t idxInstr, uint64_t a_fGstShwFlush,
|
---|
226 | uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0, uint64_t uArg1)
|
---|
227 | {
|
---|
228 | return iemNativeEmitCImplCall(pReNative, off, idxInstr, a_fGstShwFlush, pfnCImpl, cbInstr, 2, uArg0, uArg1, 0);
|
---|
229 | }
|
---|
230 |
|
---|
231 |
|
---|
232 | #define IEM_MC_DEFER_TO_CIMPL_3_RET_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
|
---|
233 | pReNative->fMc = 0; \
|
---|
234 | pReNative->fCImpl = (a_fFlags); \
|
---|
235 | return iemNativeEmitCImplCall3(pReNative, off, pCallEntry->idxInstr, a_fGstShwFlush, \
|
---|
236 | (uintptr_t)a_pfnCImpl, a_cbInstr, a0, a1, a2)
|
---|
237 |
|
---|
238 | DECL_INLINE_THROW(uint32_t) iemNativeEmitCImplCall3(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
239 | uint8_t idxInstr, uint64_t a_fGstShwFlush,
|
---|
240 | uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0, uint64_t uArg1,
|
---|
241 | uint64_t uArg2)
|
---|
242 | {
|
---|
243 | return iemNativeEmitCImplCall(pReNative, off, idxInstr, a_fGstShwFlush, pfnCImpl, cbInstr, 3, uArg0, uArg1, uArg2);
|
---|
244 | }
|
---|
245 |
|
---|
246 |
|
---|
247 |
|
---|
248 | /*********************************************************************************************************************************
|
---|
249 | * Emitters for advancing PC/RIP/EIP/IP (IEM_MC_ADVANCE_RIP_AND_FINISH_XXX) *
|
---|
250 | *********************************************************************************************************************************/
|
---|
251 |
|
---|
252 | /** Emits the flags check for IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64_WITH_FLAGS
|
---|
253 | * and the other _WITH_FLAGS MCs, see iemRegFinishClearingRF. */
|
---|
254 | DECL_INLINE_THROW(uint32_t)
|
---|
255 | iemNativeEmitFinishInstructionFlagsCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
256 | {
|
---|
257 | /*
|
---|
258 | * If its not just X86_EFL_RF and CPUMCTX_INHIBIT_SHADOW that are set, we
|
---|
259 | * return with special status code and make the execution loop deal with
|
---|
260 | * this. If TF or CPUMCTX_DBG_HIT_DRX_MASK triggers, we have to raise an
|
---|
261 | * exception and won't continue execution. While CPUMCTX_DBG_DBGF_MASK
|
---|
262 | * could continue w/o interruption, it probably will drop into the
|
---|
263 | * debugger, so not worth the effort of trying to services it here and we
|
---|
264 | * just lump it in with the handling of the others.
|
---|
265 | *
|
---|
266 | * To simplify the code and the register state management even more (wrt
|
---|
267 | * immediate in AND operation), we always update the flags and skip the
|
---|
268 | * extra check associated conditional jump.
|
---|
269 | */
|
---|
270 | AssertCompile( (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)
|
---|
271 | <= UINT32_MAX);
|
---|
272 | #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
|
---|
273 | AssertMsg( pReNative->idxCurCall == 0
|
---|
274 | || IEMLIVENESS_STATE_IS_INPUT_EXPECTED(iemNativeLivenessGetStateByGstRegEx(&pReNative->paLivenessEntries[pReNative->idxCurCall - 1], IEMLIVENESSBIT_IDX_EFL_OTHER)),
|
---|
275 | ("Efl_Other - %u\n", iemNativeLivenessGetStateByGstRegEx(&pReNative->paLivenessEntries[pReNative->idxCurCall - 1], IEMLIVENESSBIT_IDX_EFL_OTHER)));
|
---|
276 | #endif
|
---|
277 |
|
---|
278 | /*
|
---|
279 | * As this code can break out of the execution loop when jumping to the ReturnWithFlags label
|
---|
280 | * any pending register writes must be flushed.
|
---|
281 | */
|
---|
282 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
283 |
|
---|
284 | uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags,
|
---|
285 | kIemNativeGstRegUse_ForUpdate, false /*fNoVolatileRegs*/,
|
---|
286 | true /*fSkipLivenessAssert*/);
|
---|
287 | off = iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfAnySet(pReNative, off, idxEflReg,
|
---|
288 | X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK,
|
---|
289 | iemNativeLabelCreate(pReNative, kIemNativeLabelType_ReturnWithFlags));
|
---|
290 | off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxEflReg, ~(uint32_t)(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW));
|
---|
291 | off = iemNativeEmitStoreGprToVCpuU32(pReNative, off, idxEflReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.eflags));
|
---|
292 |
|
---|
293 | /* Free but don't flush the EFLAGS register. */
|
---|
294 | iemNativeRegFreeTmp(pReNative, idxEflReg);
|
---|
295 |
|
---|
296 | return off;
|
---|
297 | }
|
---|
298 |
|
---|
299 |
|
---|
300 | /** The VINF_SUCCESS dummy. */
|
---|
301 | template<int const a_rcNormal>
|
---|
302 | DECL_FORCE_INLINE(uint32_t)
|
---|
303 | iemNativeEmitFinishInstructionWithStatus(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr)
|
---|
304 | {
|
---|
305 | AssertCompile(a_rcNormal == VINF_SUCCESS || a_rcNormal == VINF_IEM_REEXEC_BREAK);
|
---|
306 | if (a_rcNormal != VINF_SUCCESS)
|
---|
307 | {
|
---|
308 | #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
309 | off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
|
---|
310 | #else
|
---|
311 | RT_NOREF_PV(idxInstr);
|
---|
312 | #endif
|
---|
313 |
|
---|
314 | /* As this code returns from the TB any pending register writes must be flushed. */
|
---|
315 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
316 |
|
---|
317 | return iemNativeEmitJmpToNewLabel(pReNative, off, kIemNativeLabelType_ReturnBreak);
|
---|
318 | }
|
---|
319 | return off;
|
---|
320 | }
|
---|
321 |
|
---|
322 |
|
---|
323 | #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64(a_cbInstr, a_rcNormal) \
|
---|
324 | off = iemNativeEmitAddToRip64AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \
|
---|
325 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
326 |
|
---|
327 | #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_cbInstr, a_rcNormal) \
|
---|
328 | off = iemNativeEmitAddToRip64AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \
|
---|
329 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
|
---|
330 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
331 |
|
---|
332 | /** Same as iemRegAddToRip64AndFinishingNoFlags. */
|
---|
333 | DECL_INLINE_THROW(uint32_t)
|
---|
334 | iemNativeEmitAddToRip64AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr)
|
---|
335 | {
|
---|
336 | #if !defined(IEMNATIVE_WITH_DELAYED_PC_UPDATING) || defined(IEMNATIVE_REG_FIXED_PC_DBG)
|
---|
337 | # if defined(IEMNATIVE_REG_FIXED_PC_DBG)
|
---|
338 | if (!pReNative->Core.offPc)
|
---|
339 | off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, IEMNATIVE_REG_FIXED_PC_DBG, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
|
---|
340 | # endif
|
---|
341 |
|
---|
342 | /* Allocate a temporary PC register. */
|
---|
343 | uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
|
---|
344 |
|
---|
345 | /* Perform the addition and store the result. */
|
---|
346 | off = iemNativeEmitAddGprImm8(pReNative, off, idxPcReg, cbInstr);
|
---|
347 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
|
---|
348 |
|
---|
349 | /* Free but don't flush the PC register. */
|
---|
350 | iemNativeRegFreeTmp(pReNative, idxPcReg);
|
---|
351 | #endif
|
---|
352 |
|
---|
353 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
354 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
|
---|
355 |
|
---|
356 | pReNative->Core.offPc += cbInstr;
|
---|
357 | # if defined(IEMNATIVE_REG_FIXED_PC_DBG)
|
---|
358 | off = iemNativePcAdjustCheck(pReNative, off);
|
---|
359 | # endif
|
---|
360 | if (pReNative->cCondDepth)
|
---|
361 | off = iemNativeEmitPcWriteback(pReNative, off);
|
---|
362 | else
|
---|
363 | pReNative->Core.cInstrPcUpdateSkipped++;
|
---|
364 | #endif
|
---|
365 |
|
---|
366 | return off;
|
---|
367 | }
|
---|
368 |
|
---|
369 |
|
---|
370 | #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32(a_cbInstr, a_rcNormal) \
|
---|
371 | off = iemNativeEmitAddToEip32AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \
|
---|
372 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
373 |
|
---|
374 | #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_cbInstr, a_rcNormal) \
|
---|
375 | off = iemNativeEmitAddToEip32AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \
|
---|
376 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
|
---|
377 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
378 |
|
---|
379 | /** Same as iemRegAddToEip32AndFinishingNoFlags. */
|
---|
380 | DECL_INLINE_THROW(uint32_t)
|
---|
381 | iemNativeEmitAddToEip32AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr)
|
---|
382 | {
|
---|
383 | #if !defined(IEMNATIVE_WITH_DELAYED_PC_UPDATING) || defined(IEMNATIVE_REG_FIXED_PC_DBG)
|
---|
384 | # if defined(IEMNATIVE_REG_FIXED_PC_DBG)
|
---|
385 | if (!pReNative->Core.offPc)
|
---|
386 | off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, IEMNATIVE_REG_FIXED_PC_DBG, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
|
---|
387 | # endif
|
---|
388 |
|
---|
389 | /* Allocate a temporary PC register. */
|
---|
390 | uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
|
---|
391 |
|
---|
392 | /* Perform the addition and store the result. */
|
---|
393 | off = iemNativeEmitAddGpr32Imm8(pReNative, off, idxPcReg, cbInstr);
|
---|
394 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
|
---|
395 |
|
---|
396 | /* Free but don't flush the PC register. */
|
---|
397 | iemNativeRegFreeTmp(pReNative, idxPcReg);
|
---|
398 | #endif
|
---|
399 |
|
---|
400 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
401 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
|
---|
402 |
|
---|
403 | pReNative->Core.offPc += cbInstr;
|
---|
404 | # if defined(IEMNATIVE_REG_FIXED_PC_DBG)
|
---|
405 | off = iemNativePcAdjustCheck(pReNative, off);
|
---|
406 | # endif
|
---|
407 | if (pReNative->cCondDepth)
|
---|
408 | off = iemNativeEmitPcWriteback(pReNative, off);
|
---|
409 | else
|
---|
410 | pReNative->Core.cInstrPcUpdateSkipped++;
|
---|
411 | #endif
|
---|
412 |
|
---|
413 | return off;
|
---|
414 | }
|
---|
415 |
|
---|
416 |
|
---|
417 | #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16(a_cbInstr, a_rcNormal) \
|
---|
418 | off = iemNativeEmitAddToIp16AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \
|
---|
419 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
420 |
|
---|
421 | #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_cbInstr, a_rcNormal) \
|
---|
422 | off = iemNativeEmitAddToIp16AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \
|
---|
423 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
|
---|
424 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
425 |
|
---|
426 | /** Same as iemRegAddToIp16AndFinishingNoFlags. */
|
---|
427 | DECL_INLINE_THROW(uint32_t)
|
---|
428 | iemNativeEmitAddToIp16AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr)
|
---|
429 | {
|
---|
430 | #if !defined(IEMNATIVE_WITH_DELAYED_PC_UPDATING) || defined(IEMNATIVE_REG_FIXED_PC_DBG)
|
---|
431 | # if defined(IEMNATIVE_REG_FIXED_PC_DBG)
|
---|
432 | if (!pReNative->Core.offPc)
|
---|
433 | off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, IEMNATIVE_REG_FIXED_PC_DBG, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
|
---|
434 | # endif
|
---|
435 |
|
---|
436 | /* Allocate a temporary PC register. */
|
---|
437 | uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
|
---|
438 |
|
---|
439 | /* Perform the addition and store the result. */
|
---|
440 | off = iemNativeEmitAddGpr32Imm8(pReNative, off, idxPcReg, cbInstr);
|
---|
441 | off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg);
|
---|
442 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
|
---|
443 |
|
---|
444 | /* Free but don't flush the PC register. */
|
---|
445 | iemNativeRegFreeTmp(pReNative, idxPcReg);
|
---|
446 | #endif
|
---|
447 |
|
---|
448 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
449 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
|
---|
450 |
|
---|
451 | pReNative->Core.offPc += cbInstr;
|
---|
452 | # if defined(IEMNATIVE_REG_FIXED_PC_DBG)
|
---|
453 | off = iemNativePcAdjustCheck(pReNative, off);
|
---|
454 | # endif
|
---|
455 | if (pReNative->cCondDepth)
|
---|
456 | off = iemNativeEmitPcWriteback(pReNative, off);
|
---|
457 | else
|
---|
458 | pReNative->Core.cInstrPcUpdateSkipped++;
|
---|
459 | #endif
|
---|
460 |
|
---|
461 | return off;
|
---|
462 | }
|
---|
463 |
|
---|
464 |
|
---|
465 |
|
---|
466 | /*********************************************************************************************************************************
|
---|
467 | * Emitters for changing PC/RIP/EIP/IP with a relative jump (IEM_MC_REL_JMP_XXX_AND_FINISH_XXX). *
|
---|
468 | *********************************************************************************************************************************/
|
---|
469 |
|
---|
470 | #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
|
---|
471 | off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
|
---|
472 | (a_enmEffOpSize), pCallEntry->idxInstr); \
|
---|
473 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
474 |
|
---|
475 | #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
|
---|
476 | off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
|
---|
477 | (a_enmEffOpSize), pCallEntry->idxInstr); \
|
---|
478 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
|
---|
479 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
480 |
|
---|
481 | #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr, a_rcNormal) \
|
---|
482 | off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
|
---|
483 | IEMMODE_16BIT, pCallEntry->idxInstr); \
|
---|
484 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
485 |
|
---|
486 | #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
|
---|
487 | off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
|
---|
488 | IEMMODE_16BIT, pCallEntry->idxInstr); \
|
---|
489 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
|
---|
490 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
491 |
|
---|
492 | #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr, a_rcNormal) \
|
---|
493 | off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), \
|
---|
494 | IEMMODE_64BIT, pCallEntry->idxInstr); \
|
---|
495 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
496 |
|
---|
497 | #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
|
---|
498 | off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), \
|
---|
499 | IEMMODE_64BIT, pCallEntry->idxInstr); \
|
---|
500 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
|
---|
501 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
502 |
|
---|
503 | /** Same as iemRegRip64RelativeJumpS8AndFinishNoFlags,
|
---|
504 | * iemRegRip64RelativeJumpS16AndFinishNoFlags and
|
---|
505 | * iemRegRip64RelativeJumpS32AndFinishNoFlags. */
|
---|
506 | DECL_INLINE_THROW(uint32_t)
|
---|
507 | iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr,
|
---|
508 | int32_t offDisp, IEMMODE enmEffOpSize, uint8_t idxInstr)
|
---|
509 | {
|
---|
510 | Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
|
---|
511 |
|
---|
512 | /* We speculatively modify PC and may raise #GP(0), so make sure the right values are in CPUMCTX. */
|
---|
513 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
514 |
|
---|
515 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
516 | Assert(pReNative->Core.offPc == 0);
|
---|
517 |
|
---|
518 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
|
---|
519 | #endif
|
---|
520 |
|
---|
521 | /* Allocate a temporary PC register. */
|
---|
522 | uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
|
---|
523 |
|
---|
524 | /* Perform the addition. */
|
---|
525 | off = iemNativeEmitAddGprImm(pReNative, off, idxPcReg, (int64_t)offDisp + cbInstr);
|
---|
526 |
|
---|
527 | if (RT_LIKELY(enmEffOpSize == IEMMODE_64BIT))
|
---|
528 | {
|
---|
529 | /* Check that the address is canonical, raising #GP(0) + exit TB if it isn't. */
|
---|
530 | off = iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr);
|
---|
531 | }
|
---|
532 | else
|
---|
533 | {
|
---|
534 | /* Just truncate the result to 16-bit IP. */
|
---|
535 | Assert(enmEffOpSize == IEMMODE_16BIT);
|
---|
536 | off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg);
|
---|
537 | }
|
---|
538 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
|
---|
539 |
|
---|
540 | /* Free but don't flush the PC register. */
|
---|
541 | iemNativeRegFreeTmp(pReNative, idxPcReg);
|
---|
542 |
|
---|
543 | return off;
|
---|
544 | }
|
---|
545 |
|
---|
546 |
|
---|
547 | #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
|
---|
548 | off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
|
---|
549 | (a_enmEffOpSize), pCallEntry->idxInstr); \
|
---|
550 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
551 |
|
---|
552 | #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i8, a_cbInstr, a_enmEffOpSize, a_rcNormal) \
|
---|
553 | off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \
|
---|
554 | (a_enmEffOpSize), pCallEntry->idxInstr); \
|
---|
555 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
|
---|
556 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
557 |
|
---|
558 | #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr, a_rcNormal) \
|
---|
559 | off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
|
---|
560 | IEMMODE_16BIT, pCallEntry->idxInstr); \
|
---|
561 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
562 |
|
---|
563 | #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
|
---|
564 | off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \
|
---|
565 | IEMMODE_16BIT, pCallEntry->idxInstr); \
|
---|
566 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
|
---|
567 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
568 |
|
---|
569 | #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr, a_rcNormal) \
|
---|
570 | off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), \
|
---|
571 | IEMMODE_32BIT, pCallEntry->idxInstr); \
|
---|
572 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
573 |
|
---|
574 | #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
|
---|
575 | off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), \
|
---|
576 | IEMMODE_32BIT, pCallEntry->idxInstr); \
|
---|
577 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
|
---|
578 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
579 |
|
---|
580 | /** Same as iemRegEip32RelativeJumpS8AndFinishNoFlags,
|
---|
581 | * iemRegEip32RelativeJumpS16AndFinishNoFlags and
|
---|
582 | * iemRegEip32RelativeJumpS32AndFinishNoFlags. */
|
---|
583 | DECL_INLINE_THROW(uint32_t)
|
---|
584 | iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr,
|
---|
585 | int32_t offDisp, IEMMODE enmEffOpSize, uint8_t idxInstr)
|
---|
586 | {
|
---|
587 | Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
|
---|
588 |
|
---|
589 | /* We speculatively modify PC and may raise #GP(0), so make sure the right values are in CPUMCTX. */
|
---|
590 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
591 |
|
---|
592 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
593 | Assert(pReNative->Core.offPc == 0);
|
---|
594 |
|
---|
595 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
|
---|
596 | #endif
|
---|
597 |
|
---|
598 | /* Allocate a temporary PC register. */
|
---|
599 | uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
|
---|
600 |
|
---|
601 | /* Perform the addition. */
|
---|
602 | off = iemNativeEmitAddGpr32Imm(pReNative, off, idxPcReg, offDisp + cbInstr);
|
---|
603 |
|
---|
604 | /* Truncate the result to 16-bit IP if the operand size is 16-bit. */
|
---|
605 | if (enmEffOpSize == IEMMODE_16BIT)
|
---|
606 | off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg);
|
---|
607 |
|
---|
608 | /* Perform limit checking, potentially raising #GP(0) and exit the TB. */
|
---|
609 | /** @todo we can skip this in 32-bit FLAT mode. */
|
---|
610 | off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr);
|
---|
611 |
|
---|
612 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
|
---|
613 |
|
---|
614 | /* Free but don't flush the PC register. */
|
---|
615 | iemNativeRegFreeTmp(pReNative, idxPcReg);
|
---|
616 |
|
---|
617 | return off;
|
---|
618 | }
|
---|
619 |
|
---|
620 |
|
---|
621 | #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16(a_i8, a_cbInstr, a_rcNormal) \
|
---|
622 | off = iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), pCallEntry->idxInstr); \
|
---|
623 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
624 |
|
---|
625 | #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i8, a_cbInstr, a_rcNormal) \
|
---|
626 | off = iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), pCallEntry->idxInstr); \
|
---|
627 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
|
---|
628 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
629 |
|
---|
630 | #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr, a_rcNormal) \
|
---|
631 | off = iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), pCallEntry->idxInstr); \
|
---|
632 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
633 |
|
---|
634 | #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i16, a_cbInstr, a_rcNormal) \
|
---|
635 | off = iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), pCallEntry->idxInstr); \
|
---|
636 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
|
---|
637 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
638 |
|
---|
639 | #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr, a_rcNormal) \
|
---|
640 | off = iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), pCallEntry->idxInstr); \
|
---|
641 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
642 |
|
---|
643 | #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i32, a_cbInstr, a_rcNormal) \
|
---|
644 | off = iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), pCallEntry->idxInstr); \
|
---|
645 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
|
---|
646 | off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal>(pReNative, off, pCallEntry->idxInstr)
|
---|
647 |
|
---|
648 | /** Same as iemRegIp16RelativeJumpS8AndFinishNoFlags. */
|
---|
649 | DECL_INLINE_THROW(uint32_t)
|
---|
650 | iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
651 | uint8_t cbInstr, int32_t offDisp, uint8_t idxInstr)
|
---|
652 | {
|
---|
653 | /* We speculatively modify PC and may raise #GP(0), so make sure the right values are in CPUMCTX. */
|
---|
654 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
655 |
|
---|
656 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
657 | Assert(pReNative->Core.offPc == 0);
|
---|
658 |
|
---|
659 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
|
---|
660 | #endif
|
---|
661 |
|
---|
662 | /* Allocate a temporary PC register. */
|
---|
663 | uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
|
---|
664 |
|
---|
665 | /* Perform the addition, clamp the result, check limit (may #GP(0) + exit TB) and store the result. */
|
---|
666 | off = iemNativeEmitAddGpr32Imm(pReNative, off, idxPcReg, offDisp + cbInstr);
|
---|
667 | off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg);
|
---|
668 | off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr);
|
---|
669 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
|
---|
670 |
|
---|
671 | /* Free but don't flush the PC register. */
|
---|
672 | iemNativeRegFreeTmp(pReNative, idxPcReg);
|
---|
673 |
|
---|
674 | return off;
|
---|
675 | }
|
---|
676 |
|
---|
677 |
|
---|
678 |
|
---|
679 | /*********************************************************************************************************************************
|
---|
680 | * Emitters for changing PC/RIP/EIP/IP with a indirect jump (IEM_MC_SET_RIP_UXX_AND_FINISH). *
|
---|
681 | *********************************************************************************************************************************/
|
---|
682 |
|
---|
683 | /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for pre-386 targets. */
|
---|
684 | #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16(a_u16NewIP) \
|
---|
685 | off = iemNativeEmitRipJumpNoFlags(pReNative, off, (a_u16NewIP), false /*f64Bit*/, pCallEntry->idxInstr, sizeof(uint16_t))
|
---|
686 |
|
---|
687 | /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for 386+ targets. */
|
---|
688 | #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32(a_u16NewIP) \
|
---|
689 | off = iemNativeEmitRipJumpNoFlags(pReNative, off, (a_u16NewIP), false /*f64Bit*/, pCallEntry->idxInstr, sizeof(uint16_t))
|
---|
690 |
|
---|
691 | /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for use in 64-bit code. */
|
---|
692 | #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64(a_u16NewIP) \
|
---|
693 | off = iemNativeEmitRipJumpNoFlags(pReNative, off, (a_u16NewIP), true /*f64Bit*/, pCallEntry->idxInstr, sizeof(uint16_t))
|
---|
694 |
|
---|
695 | /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for pre-386 targets that checks and
|
---|
696 | * clears flags. */
|
---|
697 | #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16NewIP) \
|
---|
698 | IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16(a_u16NewIP); \
|
---|
699 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off)
|
---|
700 |
|
---|
701 | /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for 386+ targets that checks and
|
---|
702 | * clears flags. */
|
---|
703 | #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16NewIP) \
|
---|
704 | IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32(a_u16NewIP); \
|
---|
705 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off)
|
---|
706 |
|
---|
707 | /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for use in 64-bit code that checks and
|
---|
708 | * clears flags. */
|
---|
709 | #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16NewIP) \
|
---|
710 | IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64(a_u16NewIP); \
|
---|
711 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off)
|
---|
712 |
|
---|
713 | #undef IEM_MC_SET_RIP_U16_AND_FINISH
|
---|
714 |
|
---|
715 |
|
---|
716 | /** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for 386+ targets. */
|
---|
717 | #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32(a_u32NewEIP) \
|
---|
718 | off = iemNativeEmitRipJumpNoFlags(pReNative, off, (a_u32NewEIP), false /*f64Bit*/, pCallEntry->idxInstr, sizeof(uint32_t))
|
---|
719 |
|
---|
720 | /** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for use in 64-bit code. */
|
---|
721 | #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64(a_u32NewEIP) \
|
---|
722 | off = iemNativeEmitRipJumpNoFlags(pReNative, off, (a_u32NewEIP), true /*f64Bit*/, pCallEntry->idxInstr, sizeof(uint32_t))
|
---|
723 |
|
---|
724 | /** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for 386+ targets that checks and
|
---|
725 | * clears flags. */
|
---|
726 | #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u32NewEIP) \
|
---|
727 | IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32(a_u32NewEIP); \
|
---|
728 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off)
|
---|
729 |
|
---|
730 | /** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for use in 64-bit code that checks
|
---|
731 | * and clears flags. */
|
---|
732 | #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP) \
|
---|
733 | IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64(a_u32NewEIP); \
|
---|
734 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off)
|
---|
735 |
|
---|
736 | #undef IEM_MC_SET_RIP_U32_AND_FINISH
|
---|
737 |
|
---|
738 |
|
---|
739 | /** Variant of IEM_MC_SET_RIP_U64_AND_FINISH for use in 64-bit code. */
|
---|
740 | #define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64(a_u64NewEIP) \
|
---|
741 | off = iemNativeEmitRipJumpNoFlags(pReNative, off, (a_u64NewEIP), true /*f64Bit*/, pCallEntry->idxInstr, sizeof(uint64_t))
|
---|
742 |
|
---|
743 | /** Variant of IEM_MC_SET_RIP_U64_AND_FINISH for use in 64-bit code that checks
|
---|
744 | * and clears flags. */
|
---|
745 | #define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u64NewEIP) \
|
---|
746 | IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64(a_u64NewEIP); \
|
---|
747 | off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off)
|
---|
748 |
|
---|
749 | #undef IEM_MC_SET_RIP_U64_AND_FINISH
|
---|
750 |
|
---|
751 |
|
---|
752 | /** Same as iemRegRipJumpU16AndFinishNoFlags,
|
---|
753 | * iemRegRipJumpU32AndFinishNoFlags and iemRegRipJumpU64AndFinishNoFlags. */
|
---|
754 | DECL_INLINE_THROW(uint32_t)
|
---|
755 | iemNativeEmitRipJumpNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarPc, bool f64Bit,
|
---|
756 | uint8_t idxInstr, uint8_t cbVar)
|
---|
757 | {
|
---|
758 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarPc);
|
---|
759 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVarPc, cbVar);
|
---|
760 |
|
---|
761 | /* We speculatively modify PC and may raise #GP(0), so make sure the right values are in CPUMCTX. */
|
---|
762 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
763 |
|
---|
764 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
765 | Assert(pReNative->Core.offPc == 0);
|
---|
766 |
|
---|
767 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
|
---|
768 | #endif
|
---|
769 |
|
---|
770 | /* Get a register with the new PC loaded from idxVarPc.
|
---|
771 | Note! This ASSUMES that the high bits of the GPR is zeroed. */
|
---|
772 | uint8_t const idxPcReg = iemNativeVarRegisterAcquireForGuestReg(pReNative, idxVarPc, kIemNativeGstReg_Pc, &off);
|
---|
773 |
|
---|
774 | /* Check limit (may #GP(0) + exit TB). */
|
---|
775 | if (!f64Bit)
|
---|
776 | /** @todo we can skip this test in FLAT 32-bit mode. */
|
---|
777 | off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr);
|
---|
778 | /* Check that the address is canonical, raising #GP(0) + exit TB if it isn't. */
|
---|
779 | else if (cbVar > sizeof(uint32_t))
|
---|
780 | off = iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr);
|
---|
781 |
|
---|
782 | /* Store the result. */
|
---|
783 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
|
---|
784 |
|
---|
785 | iemNativeVarRegisterRelease(pReNative, idxVarPc);
|
---|
786 | /** @todo implictly free the variable? */
|
---|
787 |
|
---|
788 | return off;
|
---|
789 | }
|
---|
790 |
|
---|
791 |
|
---|
792 |
|
---|
793 | /*********************************************************************************************************************************
|
---|
794 | * Emitters for raising exceptions (IEM_MC_MAYBE_RAISE_XXX) *
|
---|
795 | *********************************************************************************************************************************/
|
---|
796 |
|
---|
797 | #define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
|
---|
798 | off = iemNativeEmitMaybeRaiseDeviceNotAvailable(pReNative, off, pCallEntry->idxInstr)
|
---|
799 |
|
---|
800 | /**
|
---|
801 | * Emits code to check if a \#NM exception should be raised.
|
---|
802 | *
|
---|
803 | * @returns New code buffer offset, UINT32_MAX on failure.
|
---|
804 | * @param pReNative The native recompile state.
|
---|
805 | * @param off The code buffer offset.
|
---|
806 | * @param idxInstr The current instruction.
|
---|
807 | */
|
---|
808 | DECL_INLINE_THROW(uint32_t)
|
---|
809 | iemNativeEmitMaybeRaiseDeviceNotAvailable(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr)
|
---|
810 | {
|
---|
811 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
812 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckPotential);
|
---|
813 |
|
---|
814 | if (!(pReNative->fSimdRaiseXcptChecksEmitted & IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_DEVICE_NOT_AVAILABLE))
|
---|
815 | {
|
---|
816 | #endif
|
---|
817 | /*
|
---|
818 | * Make sure we don't have any outstanding guest register writes as we may
|
---|
819 | * raise an #NM and all guest register must be up to date in CPUMCTX.
|
---|
820 | *
|
---|
821 | * @todo r=aeichner Can we postpone this to the RaiseNm path?
|
---|
822 | */
|
---|
823 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
824 |
|
---|
825 | #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
826 | off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
|
---|
827 | #else
|
---|
828 | RT_NOREF(idxInstr);
|
---|
829 | #endif
|
---|
830 |
|
---|
831 | /* Allocate a temporary CR0 register. */
|
---|
832 | uint8_t const idxCr0Reg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Cr0, kIemNativeGstRegUse_ReadOnly);
|
---|
833 | uint8_t const idxLabelRaiseNm = iemNativeLabelCreate(pReNative, kIemNativeLabelType_RaiseNm);
|
---|
834 |
|
---|
835 | /*
|
---|
836 | * if (cr0 & (X86_CR0_EM | X86_CR0_TS) != 0)
|
---|
837 | * return raisexcpt();
|
---|
838 | */
|
---|
839 | /* Test and jump. */
|
---|
840 | off = iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfAnySet(pReNative, off, idxCr0Reg, X86_CR0_EM | X86_CR0_TS, idxLabelRaiseNm);
|
---|
841 |
|
---|
842 | /* Free but don't flush the CR0 register. */
|
---|
843 | iemNativeRegFreeTmp(pReNative, idxCr0Reg);
|
---|
844 |
|
---|
845 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
846 | pReNative->fSimdRaiseXcptChecksEmitted |= IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_DEVICE_NOT_AVAILABLE;
|
---|
847 | }
|
---|
848 | else
|
---|
849 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckOmitted);
|
---|
850 | #endif
|
---|
851 |
|
---|
852 | return off;
|
---|
853 | }
|
---|
854 |
|
---|
855 |
|
---|
856 | #define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
|
---|
857 | off = iemNativeEmitMaybeFpuException(pReNative, off, pCallEntry->idxInstr)
|
---|
858 |
|
---|
859 | /**
|
---|
860 | * Emits code to check if a \#MF exception should be raised.
|
---|
861 | *
|
---|
862 | * @returns New code buffer offset, UINT32_MAX on failure.
|
---|
863 | * @param pReNative The native recompile state.
|
---|
864 | * @param off The code buffer offset.
|
---|
865 | * @param idxInstr The current instruction.
|
---|
866 | */
|
---|
867 | DECL_INLINE_THROW(uint32_t)
|
---|
868 | iemNativeEmitMaybeRaiseFpuException(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr)
|
---|
869 | {
|
---|
870 | /*
|
---|
871 | * Make sure we don't have any outstanding guest register writes as we may
|
---|
872 | * raise an #MF and all guest register must be up to date in CPUMCTX.
|
---|
873 | *
|
---|
874 | * @todo r=aeichner Can we postpone this to the RaiseMf path?
|
---|
875 | */
|
---|
876 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
877 |
|
---|
878 | #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
879 | off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
|
---|
880 | #else
|
---|
881 | RT_NOREF(idxInstr);
|
---|
882 | #endif
|
---|
883 |
|
---|
884 | /* Allocate a temporary FSW register. */
|
---|
885 | uint8_t const idxFpuFswReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_FpuFsw, kIemNativeGstRegUse_ReadOnly);
|
---|
886 | uint8_t const idxLabelRaiseMf = iemNativeLabelCreate(pReNative, kIemNativeLabelType_RaiseMf);
|
---|
887 |
|
---|
888 | /*
|
---|
889 | * if (FSW & X86_FSW_ES != 0)
|
---|
890 | * return raisexcpt();
|
---|
891 | */
|
---|
892 | /* Test and jump. */
|
---|
893 | off = iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfAnySet(pReNative, off, idxFpuFswReg, X86_FSW_ES, idxLabelRaiseMf);
|
---|
894 |
|
---|
895 | /* Free but don't flush the FSW register. */
|
---|
896 | iemNativeRegFreeTmp(pReNative, idxFpuFswReg);
|
---|
897 |
|
---|
898 | return off;
|
---|
899 | }
|
---|
900 |
|
---|
901 |
|
---|
902 | #define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
|
---|
903 | off = iemNativeEmitMaybeRaiseSseRelatedXcpt(pReNative, off, pCallEntry->idxInstr)
|
---|
904 |
|
---|
905 | /**
|
---|
906 | * Emits code to check if a SSE exception (either \#UD or \#NM) should be raised.
|
---|
907 | *
|
---|
908 | * @returns New code buffer offset, UINT32_MAX on failure.
|
---|
909 | * @param pReNative The native recompile state.
|
---|
910 | * @param off The code buffer offset.
|
---|
911 | * @param idxInstr The current instruction.
|
---|
912 | */
|
---|
913 | DECL_INLINE_THROW(uint32_t)
|
---|
914 | iemNativeEmitMaybeRaiseSseRelatedXcpt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr)
|
---|
915 | {
|
---|
916 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
917 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeMaybeSseXcptCheckPotential);
|
---|
918 |
|
---|
919 | if (!(pReNative->fSimdRaiseXcptChecksEmitted & IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_SSE))
|
---|
920 | {
|
---|
921 | #endif
|
---|
922 | /*
|
---|
923 | * Make sure we don't have any outstanding guest register writes as we may
|
---|
924 | * raise an \#UD or \#NM and all guest register must be up to date in CPUMCTX.
|
---|
925 | *
|
---|
926 | * @todo r=aeichner Can we postpone this to the RaiseNm/RaiseUd path?
|
---|
927 | */
|
---|
928 | off = iemNativeRegFlushPendingWrites(pReNative, off, false /*fFlushShadows*/);
|
---|
929 |
|
---|
930 | #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
931 | off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
|
---|
932 | #else
|
---|
933 | RT_NOREF(idxInstr);
|
---|
934 | #endif
|
---|
935 |
|
---|
936 | /* Allocate a temporary CR0 and CR4 register. */
|
---|
937 | uint8_t const idxCr0Reg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Cr0, kIemNativeGstRegUse_ReadOnly);
|
---|
938 | uint8_t const idxCr4Reg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Cr4, kIemNativeGstRegUse_ReadOnly);
|
---|
939 | uint8_t const idxLabelRaiseNm = iemNativeLabelCreate(pReNative, kIemNativeLabelType_RaiseNm);
|
---|
940 | uint8_t const idxLabelRaiseUd = iemNativeLabelCreate(pReNative, kIemNativeLabelType_RaiseUd);
|
---|
941 |
|
---|
942 | /** @todo r=aeichner Optimize this more later to have less compares and branches,
|
---|
943 | * (see IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() in IEMMc.h but check that it has some
|
---|
944 | * actual performance benefit first). */
|
---|
945 | /*
|
---|
946 | * if (cr0 & X86_CR0_EM)
|
---|
947 | * return raisexcpt();
|
---|
948 | */
|
---|
949 | off = iemNativeEmitTestBitInGprAndJmpToLabelIfSet(pReNative, off, idxCr0Reg, X86_CR0_EM_BIT, idxLabelRaiseUd);
|
---|
950 | /*
|
---|
951 | * if (!(cr4 & X86_CR4_OSFXSR))
|
---|
952 | * return raisexcpt();
|
---|
953 | */
|
---|
954 | off = iemNativeEmitTestBitInGprAndJmpToLabelIfNotSet(pReNative, off, idxCr4Reg, X86_CR4_OSFXSR_BIT, idxLabelRaiseUd);
|
---|
955 | /*
|
---|
956 | * if (cr0 & X86_CR0_TS)
|
---|
957 | * return raisexcpt();
|
---|
958 | */
|
---|
959 | off = iemNativeEmitTestBitInGprAndJmpToLabelIfSet(pReNative, off, idxCr0Reg, X86_CR0_TS_BIT, idxLabelRaiseNm);
|
---|
960 |
|
---|
961 | /* Free but don't flush the CR0 and CR4 register. */
|
---|
962 | iemNativeRegFreeTmp(pReNative, idxCr0Reg);
|
---|
963 | iemNativeRegFreeTmp(pReNative, idxCr4Reg);
|
---|
964 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
965 | pReNative->fSimdRaiseXcptChecksEmitted |= IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_SSE;
|
---|
966 | }
|
---|
967 | else
|
---|
968 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeMaybeSseXcptCheckOmitted);
|
---|
969 | #endif
|
---|
970 |
|
---|
971 | return off;
|
---|
972 | }
|
---|
973 |
|
---|
974 |
|
---|
975 | #define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
|
---|
976 | off = iemNativeEmitMaybeRaiseAvxRelatedXcpt(pReNative, off, pCallEntry->idxInstr)
|
---|
977 |
|
---|
978 | /**
|
---|
979 | * Emits code to check if a AVX exception (either \#UD or \#NM) should be raised.
|
---|
980 | *
|
---|
981 | * @returns New code buffer offset, UINT32_MAX on failure.
|
---|
982 | * @param pReNative The native recompile state.
|
---|
983 | * @param off The code buffer offset.
|
---|
984 | * @param idxInstr The current instruction.
|
---|
985 | */
|
---|
986 | DECL_INLINE_THROW(uint32_t)
|
---|
987 | iemNativeEmitMaybeRaiseAvxRelatedXcpt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr)
|
---|
988 | {
|
---|
989 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
990 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeMaybeAvxXcptCheckPotential);
|
---|
991 |
|
---|
992 | if (!(pReNative->fSimdRaiseXcptChecksEmitted & IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_AVX))
|
---|
993 | {
|
---|
994 | #endif
|
---|
995 | /*
|
---|
996 | * Make sure we don't have any outstanding guest register writes as we may
|
---|
997 | * raise an \#UD or \#NM and all guest register must be up to date in CPUMCTX.
|
---|
998 | *
|
---|
999 | * @todo r=aeichner Can we postpone this to the RaiseNm/RaiseUd path?
|
---|
1000 | */
|
---|
1001 | off = iemNativeRegFlushPendingWrites(pReNative, off, false /*fFlushShadows*/);
|
---|
1002 |
|
---|
1003 | #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
1004 | off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
|
---|
1005 | #else
|
---|
1006 | RT_NOREF(idxInstr);
|
---|
1007 | #endif
|
---|
1008 |
|
---|
1009 | /* Allocate a temporary CR0, CR4 and XCR0 register. */
|
---|
1010 | uint8_t const idxCr0Reg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Cr0, kIemNativeGstRegUse_ReadOnly);
|
---|
1011 | uint8_t const idxCr4Reg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Cr4, kIemNativeGstRegUse_ReadOnly);
|
---|
1012 | uint8_t const idxXcr0Reg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Xcr0, kIemNativeGstRegUse_ReadOnly);
|
---|
1013 | uint8_t const idxLabelRaiseNm = iemNativeLabelCreate(pReNative, kIemNativeLabelType_RaiseNm);
|
---|
1014 | uint8_t const idxLabelRaiseUd = iemNativeLabelCreate(pReNative, kIemNativeLabelType_RaiseUd);
|
---|
1015 |
|
---|
1016 | /** @todo r=aeichner Optimize this more later to have less compares and branches,
|
---|
1017 | * (see IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() in IEMMc.h but check that it has some
|
---|
1018 | * actual performance benefit first). */
|
---|
1019 | /*
|
---|
1020 | * if ((xcr0 & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE))
|
---|
1021 | * return raisexcpt();
|
---|
1022 | */
|
---|
1023 | const uint8_t idxRegTmp = iemNativeRegAllocTmpImm(pReNative, &off, XSAVE_C_YMM | XSAVE_C_SSE);
|
---|
1024 | off = iemNativeEmitAndGprByGpr(pReNative, off, idxRegTmp, idxXcr0Reg);
|
---|
1025 | off = iemNativeEmitTestIfGprNotEqualImmAndJmpToLabel(pReNative, off, idxRegTmp, XSAVE_C_YMM | XSAVE_C_SSE, idxLabelRaiseUd);
|
---|
1026 | iemNativeRegFreeTmp(pReNative, idxRegTmp);
|
---|
1027 |
|
---|
1028 | /*
|
---|
1029 | * if (!(cr4 & X86_CR4_OSXSAVE))
|
---|
1030 | * return raisexcpt();
|
---|
1031 | */
|
---|
1032 | off = iemNativeEmitTestBitInGprAndJmpToLabelIfNotSet(pReNative, off, idxCr4Reg, X86_CR4_OSXSAVE_BIT, idxLabelRaiseUd);
|
---|
1033 | /*
|
---|
1034 | * if (cr0 & X86_CR0_TS)
|
---|
1035 | * return raisexcpt();
|
---|
1036 | */
|
---|
1037 | off = iemNativeEmitTestBitInGprAndJmpToLabelIfSet(pReNative, off, idxCr0Reg, X86_CR0_TS_BIT, idxLabelRaiseNm);
|
---|
1038 |
|
---|
1039 | /* Free but don't flush the CR0, CR4 and XCR0 register. */
|
---|
1040 | iemNativeRegFreeTmp(pReNative, idxCr0Reg);
|
---|
1041 | iemNativeRegFreeTmp(pReNative, idxCr4Reg);
|
---|
1042 | iemNativeRegFreeTmp(pReNative, idxXcr0Reg);
|
---|
1043 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1044 | pReNative->fSimdRaiseXcptChecksEmitted |= IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_AVX;
|
---|
1045 | }
|
---|
1046 | else
|
---|
1047 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeMaybeAvxXcptCheckOmitted);
|
---|
1048 | #endif
|
---|
1049 |
|
---|
1050 | return off;
|
---|
1051 | }
|
---|
1052 |
|
---|
1053 |
|
---|
1054 | #define IEM_MC_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() \
|
---|
1055 | off = iemNativeEmitRaiseSseAvxSimdFpXcpt(pReNative, off, pCallEntry->idxInstr)
|
---|
1056 |
|
---|
1057 | /**
|
---|
1058 | * Emits code to raise a SIMD floating point (either \#UD or \#XF) should be raised.
|
---|
1059 | *
|
---|
1060 | * @returns New code buffer offset, UINT32_MAX on failure.
|
---|
1061 | * @param pReNative The native recompile state.
|
---|
1062 | * @param off The code buffer offset.
|
---|
1063 | * @param idxInstr The current instruction.
|
---|
1064 | */
|
---|
1065 | DECL_INLINE_THROW(uint32_t)
|
---|
1066 | iemNativeEmitRaiseSseAvxSimdFpXcpt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr)
|
---|
1067 | {
|
---|
1068 | /*
|
---|
1069 | * Make sure we don't have any outstanding guest register writes as we may
|
---|
1070 | * raise an \#UD or \#NM and all guest register must be up to date in CPUMCTX.
|
---|
1071 | *
|
---|
1072 | * @todo r=aeichner Can we postpone this to the RaiseNm/RaiseUd path?
|
---|
1073 | */
|
---|
1074 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
1075 |
|
---|
1076 | #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
1077 | off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
|
---|
1078 | #else
|
---|
1079 | RT_NOREF(idxInstr);
|
---|
1080 | #endif
|
---|
1081 |
|
---|
1082 | /* Allocate a temporary CR4 register. */
|
---|
1083 | uint8_t const idxCr4Reg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Cr4, kIemNativeGstRegUse_ReadOnly);
|
---|
1084 | uint8_t const idxLabelRaiseXf = iemNativeLabelCreate(pReNative, kIemNativeLabelType_RaiseXf);
|
---|
1085 | uint8_t const idxLabelRaiseUd = iemNativeLabelCreate(pReNative, kIemNativeLabelType_RaiseUd);
|
---|
1086 |
|
---|
1087 | /*
|
---|
1088 | * if (!(cr4 & X86_CR4_OSXMMEEXCPT))
|
---|
1089 | * return raisexcpt();
|
---|
1090 | */
|
---|
1091 | off = iemNativeEmitTestBitInGprAndJmpToLabelIfNotSet(pReNative, off, idxCr4Reg, X86_CR4_OSXMMEEXCPT_BIT, idxLabelRaiseXf);
|
---|
1092 |
|
---|
1093 | /* raise \#UD exception unconditionally. */
|
---|
1094 | off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelRaiseUd);
|
---|
1095 |
|
---|
1096 | /* Free but don't flush the CR4 register. */
|
---|
1097 | iemNativeRegFreeTmp(pReNative, idxCr4Reg);
|
---|
1098 |
|
---|
1099 | return off;
|
---|
1100 | }
|
---|
1101 |
|
---|
1102 |
|
---|
1103 | #define IEM_MC_RAISE_DIVIDE_ERROR() \
|
---|
1104 | off = iemNativeEmitRaiseDivideError(pReNative, off, pCallEntry->idxInstr)
|
---|
1105 |
|
---|
1106 | /**
|
---|
1107 | * Emits code to raise a \#DE.
|
---|
1108 | *
|
---|
1109 | * @returns New code buffer offset, UINT32_MAX on failure.
|
---|
1110 | * @param pReNative The native recompile state.
|
---|
1111 | * @param off The code buffer offset.
|
---|
1112 | * @param idxInstr The current instruction.
|
---|
1113 | */
|
---|
1114 | DECL_INLINE_THROW(uint32_t)
|
---|
1115 | iemNativeEmitRaiseDivideError(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr)
|
---|
1116 | {
|
---|
1117 | /*
|
---|
1118 | * Make sure we don't have any outstanding guest register writes as we may
|
---|
1119 | */
|
---|
1120 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
1121 |
|
---|
1122 | #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
1123 | off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
|
---|
1124 | #else
|
---|
1125 | RT_NOREF(idxInstr);
|
---|
1126 | #endif
|
---|
1127 |
|
---|
1128 | uint8_t const idxLabelRaiseDe = iemNativeLabelCreate(pReNative, kIemNativeLabelType_RaiseDe);
|
---|
1129 |
|
---|
1130 | /* raise \#DE exception unconditionally. */
|
---|
1131 | off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelRaiseDe);
|
---|
1132 |
|
---|
1133 | return off;
|
---|
1134 | }
|
---|
1135 |
|
---|
1136 |
|
---|
1137 | /*********************************************************************************************************************************
|
---|
1138 | * Emitters for conditionals (IEM_MC_IF_XXX, IEM_MC_ELSE, IEM_MC_ENDIF) *
|
---|
1139 | *********************************************************************************************************************************/
|
---|
1140 |
|
---|
1141 | /**
|
---|
1142 | * Pushes an IEM_MC_IF_XXX onto the condition stack.
|
---|
1143 | *
|
---|
1144 | * @returns Pointer to the condition stack entry on success, NULL on failure
|
---|
1145 | * (too many nestings)
|
---|
1146 | */
|
---|
1147 | DECL_INLINE_THROW(PIEMNATIVECOND) iemNativeCondPushIf(PIEMRECOMPILERSTATE pReNative, uint32_t *poff)
|
---|
1148 | {
|
---|
1149 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
1150 | *poff = iemNativeRegFlushPendingWrites(pReNative, *poff);
|
---|
1151 | #endif
|
---|
1152 |
|
---|
1153 | uint32_t const idxStack = pReNative->cCondDepth;
|
---|
1154 | AssertStmt(idxStack < RT_ELEMENTS(pReNative->aCondStack), IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_COND_TOO_DEEPLY_NESTED));
|
---|
1155 |
|
---|
1156 | PIEMNATIVECOND const pEntry = &pReNative->aCondStack[idxStack];
|
---|
1157 | pReNative->cCondDepth = (uint8_t)(idxStack + 1);
|
---|
1158 |
|
---|
1159 | uint16_t const uCondSeqNo = ++pReNative->uCondSeqNo;
|
---|
1160 | pEntry->fInElse = false;
|
---|
1161 | pEntry->idxLabelElse = iemNativeLabelCreate(pReNative, kIemNativeLabelType_Else, UINT32_MAX /*offWhere*/, uCondSeqNo);
|
---|
1162 | pEntry->idxLabelEndIf = iemNativeLabelCreate(pReNative, kIemNativeLabelType_Endif, UINT32_MAX /*offWhere*/, uCondSeqNo);
|
---|
1163 |
|
---|
1164 | return pEntry;
|
---|
1165 | }
|
---|
1166 |
|
---|
1167 |
|
---|
1168 | /**
|
---|
1169 | * Start of the if-block, snapshotting the register and variable state.
|
---|
1170 | */
|
---|
1171 | DECL_INLINE_THROW(void)
|
---|
1172 | iemNativeCondStartIfBlock(PIEMRECOMPILERSTATE pReNative, uint32_t offIfBlock, uint32_t idxLabelIf = UINT32_MAX)
|
---|
1173 | {
|
---|
1174 | Assert(offIfBlock != UINT32_MAX);
|
---|
1175 | Assert(pReNative->cCondDepth > 0 && pReNative->cCondDepth <= RT_ELEMENTS(pReNative->aCondStack));
|
---|
1176 | PIEMNATIVECOND const pEntry = &pReNative->aCondStack[pReNative->cCondDepth - 1];
|
---|
1177 | Assert(!pEntry->fInElse);
|
---|
1178 |
|
---|
1179 | /* Define the start of the IF block if request or for disassembly purposes. */
|
---|
1180 | if (idxLabelIf != UINT32_MAX)
|
---|
1181 | iemNativeLabelDefine(pReNative, idxLabelIf, offIfBlock);
|
---|
1182 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
1183 | else
|
---|
1184 | iemNativeLabelCreate(pReNative, kIemNativeLabelType_If, offIfBlock, pReNative->paLabels[pEntry->idxLabelElse].uData);
|
---|
1185 | #else
|
---|
1186 | RT_NOREF(offIfBlock);
|
---|
1187 | #endif
|
---|
1188 |
|
---|
1189 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
1190 | Assert(pReNative->Core.offPc == 0);
|
---|
1191 | #endif
|
---|
1192 |
|
---|
1193 | /* Copy the initial state so we can restore it in the 'else' block. */
|
---|
1194 | pEntry->InitialState = pReNative->Core;
|
---|
1195 | }
|
---|
1196 |
|
---|
1197 |
|
---|
1198 | #define IEM_MC_ELSE() } while (0); \
|
---|
1199 | off = iemNativeEmitElse(pReNative, off); \
|
---|
1200 | do {
|
---|
1201 |
|
---|
1202 | /** Emits code related to IEM_MC_ELSE. */
|
---|
1203 | DECL_INLINE_THROW(uint32_t) iemNativeEmitElse(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
1204 | {
|
---|
1205 | /* Check sanity and get the conditional stack entry. */
|
---|
1206 | Assert(off != UINT32_MAX);
|
---|
1207 | Assert(pReNative->cCondDepth > 0 && pReNative->cCondDepth <= RT_ELEMENTS(pReNative->aCondStack));
|
---|
1208 | PIEMNATIVECOND const pEntry = &pReNative->aCondStack[pReNative->cCondDepth - 1];
|
---|
1209 | Assert(!pEntry->fInElse);
|
---|
1210 |
|
---|
1211 | /* Jump to the endif */
|
---|
1212 | off = iemNativeEmitJmpToLabel(pReNative, off, pEntry->idxLabelEndIf);
|
---|
1213 |
|
---|
1214 | /* Define the else label and enter the else part of the condition. */
|
---|
1215 | iemNativeLabelDefine(pReNative, pEntry->idxLabelElse, off);
|
---|
1216 | pEntry->fInElse = true;
|
---|
1217 |
|
---|
1218 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
1219 | Assert(pReNative->Core.offPc == 0);
|
---|
1220 | #endif
|
---|
1221 |
|
---|
1222 | /* Snapshot the core state so we can do a merge at the endif and restore
|
---|
1223 | the snapshot we took at the start of the if-block. */
|
---|
1224 | pEntry->IfFinalState = pReNative->Core;
|
---|
1225 | pReNative->Core = pEntry->InitialState;
|
---|
1226 |
|
---|
1227 | return off;
|
---|
1228 | }
|
---|
1229 |
|
---|
1230 |
|
---|
1231 | #define IEM_MC_ENDIF() } while (0); \
|
---|
1232 | off = iemNativeEmitEndIf(pReNative, off)
|
---|
1233 |
|
---|
1234 | /** Emits code related to IEM_MC_ENDIF. */
|
---|
1235 | DECL_INLINE_THROW(uint32_t) iemNativeEmitEndIf(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
1236 | {
|
---|
1237 | /* Check sanity and get the conditional stack entry. */
|
---|
1238 | Assert(off != UINT32_MAX);
|
---|
1239 | Assert(pReNative->cCondDepth > 0 && pReNative->cCondDepth <= RT_ELEMENTS(pReNative->aCondStack));
|
---|
1240 | PIEMNATIVECOND const pEntry = &pReNative->aCondStack[pReNative->cCondDepth - 1];
|
---|
1241 |
|
---|
1242 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
1243 | Assert(pReNative->Core.offPc == 0);
|
---|
1244 | #endif
|
---|
1245 |
|
---|
1246 | /*
|
---|
1247 | * Now we have find common group with the core state at the end of the
|
---|
1248 | * if-final. Use the smallest common denominator and just drop anything
|
---|
1249 | * that isn't the same in both states.
|
---|
1250 | */
|
---|
1251 | /** @todo We could, maybe, shuffle registers around if we thought it helpful,
|
---|
1252 | * which is why we're doing this at the end of the else-block.
|
---|
1253 | * But we'd need more info about future for that to be worth the effort. */
|
---|
1254 | PCIEMNATIVECORESTATE const pOther = pEntry->fInElse ? &pEntry->IfFinalState : &pEntry->InitialState;
|
---|
1255 | if (memcmp(&pReNative->Core, pOther, sizeof(*pOther)) != 0)
|
---|
1256 | {
|
---|
1257 | /* shadow guest stuff first. */
|
---|
1258 | uint64_t fGstRegs = pReNative->Core.bmGstRegShadows;
|
---|
1259 | if (fGstRegs)
|
---|
1260 | {
|
---|
1261 | Assert(pReNative->Core.bmHstRegsWithGstShadow != 0);
|
---|
1262 | do
|
---|
1263 | {
|
---|
1264 | unsigned idxGstReg = ASMBitFirstSetU64(fGstRegs) - 1;
|
---|
1265 | fGstRegs &= ~RT_BIT_64(idxGstReg);
|
---|
1266 |
|
---|
1267 | uint8_t const idxHstReg = pReNative->Core.aidxGstRegShadows[idxGstReg];
|
---|
1268 | if ( !(pOther->bmGstRegShadows & RT_BIT_64(idxGstReg))
|
---|
1269 | || idxHstReg != pOther->aidxGstRegShadows[idxGstReg])
|
---|
1270 | {
|
---|
1271 | Log12(("iemNativeEmitEndIf: dropping gst %s from hst %s\n",
|
---|
1272 | g_aGstShadowInfo[idxGstReg].pszName, g_apszIemNativeHstRegNames[idxHstReg]));
|
---|
1273 | iemNativeRegClearGstRegShadowing(pReNative, idxHstReg, off);
|
---|
1274 | }
|
---|
1275 | } while (fGstRegs);
|
---|
1276 | }
|
---|
1277 | else
|
---|
1278 | Assert(pReNative->Core.bmHstRegsWithGstShadow == 0);
|
---|
1279 |
|
---|
1280 | /* Check variables next. For now we must require them to be identical
|
---|
1281 | or stuff we can recreate. */
|
---|
1282 | Assert(pReNative->Core.u64ArgVars == pOther->u64ArgVars);
|
---|
1283 | uint32_t fVars = pReNative->Core.bmVars | pOther->bmVars;
|
---|
1284 | if (fVars)
|
---|
1285 | {
|
---|
1286 | uint32_t const fVarsMustRemove = pReNative->Core.bmVars ^ pOther->bmVars;
|
---|
1287 | do
|
---|
1288 | {
|
---|
1289 | unsigned idxVar = ASMBitFirstSetU32(fVars) - 1;
|
---|
1290 | fVars &= ~RT_BIT_32(idxVar);
|
---|
1291 |
|
---|
1292 | if (!(fVarsMustRemove & RT_BIT_32(idxVar)))
|
---|
1293 | {
|
---|
1294 | if (pReNative->Core.aVars[idxVar].idxReg == pOther->aVars[idxVar].idxReg)
|
---|
1295 | continue;
|
---|
1296 | if (pReNative->Core.aVars[idxVar].enmKind != kIemNativeVarKind_Stack)
|
---|
1297 | {
|
---|
1298 | uint8_t const idxHstReg = pReNative->Core.aVars[idxVar].idxReg;
|
---|
1299 | if (idxHstReg != UINT8_MAX)
|
---|
1300 | {
|
---|
1301 | pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg);
|
---|
1302 | pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX;
|
---|
1303 | Log12(("iemNativeEmitEndIf: Dropping hst reg %s for var #%u/%#x\n",
|
---|
1304 | g_apszIemNativeHstRegNames[idxHstReg], idxVar, IEMNATIVE_VAR_IDX_PACK(idxVar)));
|
---|
1305 | }
|
---|
1306 | continue;
|
---|
1307 | }
|
---|
1308 | }
|
---|
1309 | else if (!(pReNative->Core.bmVars & RT_BIT_32(idxVar)))
|
---|
1310 | continue;
|
---|
1311 |
|
---|
1312 | /* Irreconcilable, so drop it. */
|
---|
1313 | uint8_t const idxHstReg = pReNative->Core.aVars[idxVar].idxReg;
|
---|
1314 | if (idxHstReg != UINT8_MAX)
|
---|
1315 | {
|
---|
1316 | pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg);
|
---|
1317 | pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX;
|
---|
1318 | Log12(("iemNativeEmitEndIf: Dropping hst reg %s for var #%u/%#x (also dropped)\n",
|
---|
1319 | g_apszIemNativeHstRegNames[idxHstReg], idxVar, IEMNATIVE_VAR_IDX_PACK(idxVar)));
|
---|
1320 | }
|
---|
1321 | Log11(("iemNativeEmitEndIf: Freeing variable #%u/%#x\n", idxVar, IEMNATIVE_VAR_IDX_PACK(idxVar)));
|
---|
1322 | pReNative->Core.bmVars &= ~RT_BIT_32(idxVar);
|
---|
1323 | } while (fVars);
|
---|
1324 | }
|
---|
1325 |
|
---|
1326 | /* Finally, check that the host register allocations matches. */
|
---|
1327 | AssertMsgStmt(pReNative->Core.bmHstRegs == pOther->bmHstRegs,
|
---|
1328 | ("Core.bmHstRegs=%#x pOther->bmHstRegs=%#x - %#x\n",
|
---|
1329 | pReNative->Core.bmHstRegs, pOther->bmHstRegs, pReNative->Core.bmHstRegs ^ pOther->bmHstRegs),
|
---|
1330 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_COND_ENDIF_RECONCILIATION_FAILED));
|
---|
1331 | }
|
---|
1332 |
|
---|
1333 | /*
|
---|
1334 | * Define the endif label and maybe the else one if we're still in the 'if' part.
|
---|
1335 | */
|
---|
1336 | if (!pEntry->fInElse)
|
---|
1337 | iemNativeLabelDefine(pReNative, pEntry->idxLabelElse, off);
|
---|
1338 | else
|
---|
1339 | Assert(pReNative->paLabels[pEntry->idxLabelElse].off <= off);
|
---|
1340 | iemNativeLabelDefine(pReNative, pEntry->idxLabelEndIf, off);
|
---|
1341 |
|
---|
1342 | /* Pop the conditional stack.*/
|
---|
1343 | pReNative->cCondDepth -= 1;
|
---|
1344 |
|
---|
1345 | return off;
|
---|
1346 | }
|
---|
1347 |
|
---|
1348 |
|
---|
1349 | #define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) \
|
---|
1350 | off = iemNativeEmitIfEflagAnysBitsSet(pReNative, off, (a_fBits)); \
|
---|
1351 | do {
|
---|
1352 |
|
---|
1353 | /** Emits code for IEM_MC_IF_EFL_ANY_BITS_SET. */
|
---|
1354 | DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagAnysBitsSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitsInEfl)
|
---|
1355 | {
|
---|
1356 | IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitsInEfl);
|
---|
1357 | PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
|
---|
1358 |
|
---|
1359 | /* Get the eflags. */
|
---|
1360 | uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags,
|
---|
1361 | kIemNativeGstRegUse_ReadOnly);
|
---|
1362 |
|
---|
1363 | /* Test and jump. */
|
---|
1364 | off = iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfNoneSet(pReNative, off, idxEflReg, fBitsInEfl, pEntry->idxLabelElse);
|
---|
1365 |
|
---|
1366 | /* Free but don't flush the EFlags register. */
|
---|
1367 | iemNativeRegFreeTmp(pReNative, idxEflReg);
|
---|
1368 |
|
---|
1369 | /* Make a copy of the core state now as we start the if-block. */
|
---|
1370 | iemNativeCondStartIfBlock(pReNative, off);
|
---|
1371 |
|
---|
1372 | return off;
|
---|
1373 | }
|
---|
1374 |
|
---|
1375 |
|
---|
1376 | #define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) \
|
---|
1377 | off = iemNativeEmitIfEflagNoBitsSet(pReNative, off, (a_fBits)); \
|
---|
1378 | do {
|
---|
1379 |
|
---|
1380 | /** Emits code for IEM_MC_IF_EFL_NO_BITS_SET. */
|
---|
1381 | DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagNoBitsSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitsInEfl)
|
---|
1382 | {
|
---|
1383 | IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitsInEfl);
|
---|
1384 | PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
|
---|
1385 |
|
---|
1386 | /* Get the eflags. */
|
---|
1387 | uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags,
|
---|
1388 | kIemNativeGstRegUse_ReadOnly);
|
---|
1389 |
|
---|
1390 | /* Test and jump. */
|
---|
1391 | off = iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfAnySet(pReNative, off, idxEflReg, fBitsInEfl, pEntry->idxLabelElse);
|
---|
1392 |
|
---|
1393 | /* Free but don't flush the EFlags register. */
|
---|
1394 | iemNativeRegFreeTmp(pReNative, idxEflReg);
|
---|
1395 |
|
---|
1396 | /* Make a copy of the core state now as we start the if-block. */
|
---|
1397 | iemNativeCondStartIfBlock(pReNative, off);
|
---|
1398 |
|
---|
1399 | return off;
|
---|
1400 | }
|
---|
1401 |
|
---|
1402 |
|
---|
1403 | #define IEM_MC_IF_EFL_BIT_SET(a_fBit) \
|
---|
1404 | off = iemNativeEmitIfEflagsBitSet(pReNative, off, (a_fBit)); \
|
---|
1405 | do {
|
---|
1406 |
|
---|
1407 | /** Emits code for IEM_MC_IF_EFL_BIT_SET. */
|
---|
1408 | DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagsBitSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl)
|
---|
1409 | {
|
---|
1410 | IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl);
|
---|
1411 | PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
|
---|
1412 |
|
---|
1413 | /* Get the eflags. */
|
---|
1414 | uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags,
|
---|
1415 | kIemNativeGstRegUse_ReadOnly);
|
---|
1416 |
|
---|
1417 | unsigned const iBitNo = ASMBitFirstSetU32(fBitInEfl) - 1;
|
---|
1418 | Assert(RT_BIT_32(iBitNo) == fBitInEfl);
|
---|
1419 |
|
---|
1420 | /* Test and jump. */
|
---|
1421 | off = iemNativeEmitTestBitInGprAndJmpToLabelIfNotSet(pReNative, off, idxEflReg, iBitNo, pEntry->idxLabelElse);
|
---|
1422 |
|
---|
1423 | /* Free but don't flush the EFlags register. */
|
---|
1424 | iemNativeRegFreeTmp(pReNative, idxEflReg);
|
---|
1425 |
|
---|
1426 | /* Make a copy of the core state now as we start the if-block. */
|
---|
1427 | iemNativeCondStartIfBlock(pReNative, off);
|
---|
1428 |
|
---|
1429 | return off;
|
---|
1430 | }
|
---|
1431 |
|
---|
1432 |
|
---|
1433 | #define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) \
|
---|
1434 | off = iemNativeEmitIfEflagsBitNotSet(pReNative, off, (a_fBit)); \
|
---|
1435 | do {
|
---|
1436 |
|
---|
1437 | /** Emits code for IEM_MC_IF_EFL_BIT_NOT_SET. */
|
---|
1438 | DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagsBitNotSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl)
|
---|
1439 | {
|
---|
1440 | IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl);
|
---|
1441 | PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
|
---|
1442 |
|
---|
1443 | /* Get the eflags. */
|
---|
1444 | uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags,
|
---|
1445 | kIemNativeGstRegUse_ReadOnly);
|
---|
1446 |
|
---|
1447 | unsigned const iBitNo = ASMBitFirstSetU32(fBitInEfl) - 1;
|
---|
1448 | Assert(RT_BIT_32(iBitNo) == fBitInEfl);
|
---|
1449 |
|
---|
1450 | /* Test and jump. */
|
---|
1451 | off = iemNativeEmitTestBitInGprAndJmpToLabelIfSet(pReNative, off, idxEflReg, iBitNo, pEntry->idxLabelElse);
|
---|
1452 |
|
---|
1453 | /* Free but don't flush the EFlags register. */
|
---|
1454 | iemNativeRegFreeTmp(pReNative, idxEflReg);
|
---|
1455 |
|
---|
1456 | /* Make a copy of the core state now as we start the if-block. */
|
---|
1457 | iemNativeCondStartIfBlock(pReNative, off);
|
---|
1458 |
|
---|
1459 | return off;
|
---|
1460 | }
|
---|
1461 |
|
---|
1462 |
|
---|
1463 | #define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
|
---|
1464 | off = iemNativeEmitIfEflagsTwoBitsEqual(pReNative, off, a_fBit1, a_fBit2, false /*fInverted*/); \
|
---|
1465 | do {
|
---|
1466 |
|
---|
1467 | #define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
|
---|
1468 | off = iemNativeEmitIfEflagsTwoBitsEqual(pReNative, off, a_fBit1, a_fBit2, true /*fInverted*/); \
|
---|
1469 | do {
|
---|
1470 |
|
---|
1471 | /** Emits code for IEM_MC_IF_EFL_BITS_EQ and IEM_MC_IF_EFL_BITS_NE. */
|
---|
1472 | DECL_INLINE_THROW(uint32_t)
|
---|
1473 | iemNativeEmitIfEflagsTwoBitsEqual(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
1474 | uint32_t fBit1InEfl, uint32_t fBit2InEfl, bool fInverted)
|
---|
1475 | {
|
---|
1476 | IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBit1InEfl | fBit2InEfl);
|
---|
1477 | PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
|
---|
1478 |
|
---|
1479 | /* Get the eflags. */
|
---|
1480 | uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags,
|
---|
1481 | kIemNativeGstRegUse_ReadOnly);
|
---|
1482 |
|
---|
1483 | unsigned const iBitNo1 = ASMBitFirstSetU32(fBit1InEfl) - 1;
|
---|
1484 | Assert(RT_BIT_32(iBitNo1) == fBit1InEfl);
|
---|
1485 |
|
---|
1486 | unsigned const iBitNo2 = ASMBitFirstSetU32(fBit2InEfl) - 1;
|
---|
1487 | Assert(RT_BIT_32(iBitNo2) == fBit2InEfl);
|
---|
1488 | Assert(iBitNo1 != iBitNo2);
|
---|
1489 |
|
---|
1490 | #ifdef RT_ARCH_AMD64
|
---|
1491 | uint8_t const idxTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, fBit1InEfl);
|
---|
1492 |
|
---|
1493 | off = iemNativeEmitAndGpr32ByGpr32(pReNative, off, idxTmpReg, idxEflReg);
|
---|
1494 | if (iBitNo1 > iBitNo2)
|
---|
1495 | off = iemNativeEmitShiftGpr32Right(pReNative, off, idxTmpReg, iBitNo1 - iBitNo2);
|
---|
1496 | else
|
---|
1497 | off = iemNativeEmitShiftGpr32Left(pReNative, off, idxTmpReg, iBitNo2 - iBitNo1);
|
---|
1498 | off = iemNativeEmitXorGpr32ByGpr32(pReNative, off, idxTmpReg, idxEflReg);
|
---|
1499 |
|
---|
1500 | #elif defined(RT_ARCH_ARM64)
|
---|
1501 | uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
|
---|
1502 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
|
---|
1503 |
|
---|
1504 | /* and tmpreg, eflreg, #1<<iBitNo1 */
|
---|
1505 | pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(idxTmpReg, idxEflReg, 0 /*uImm7SizeLen -> 32*/, 32 - iBitNo1, false /*f64Bit*/);
|
---|
1506 |
|
---|
1507 | /* eeyore tmpreg, eflreg, tmpreg, LSL/LSR, #abs(iBitNo2 - iBitNo1) */
|
---|
1508 | if (iBitNo1 > iBitNo2)
|
---|
1509 | pu32CodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg, idxEflReg, idxTmpReg, false /*64bit*/,
|
---|
1510 | iBitNo1 - iBitNo2, kArmv8A64InstrShift_Lsr);
|
---|
1511 | else
|
---|
1512 | pu32CodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg, idxEflReg, idxTmpReg, false /*64bit*/,
|
---|
1513 | iBitNo2 - iBitNo1, kArmv8A64InstrShift_Lsl);
|
---|
1514 |
|
---|
1515 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
1516 |
|
---|
1517 | #else
|
---|
1518 | # error "Port me"
|
---|
1519 | #endif
|
---|
1520 |
|
---|
1521 | /* Test (bit #2 is set in tmpreg if not-equal) and jump. */
|
---|
1522 | off = iemNativeEmitTestBitInGprAndJmpToLabelIfCc(pReNative, off, idxTmpReg, iBitNo2,
|
---|
1523 | pEntry->idxLabelElse, !fInverted /*fJmpIfSet*/);
|
---|
1524 |
|
---|
1525 | /* Free but don't flush the EFlags and tmp registers. */
|
---|
1526 | iemNativeRegFreeTmp(pReNative, idxTmpReg);
|
---|
1527 | iemNativeRegFreeTmp(pReNative, idxEflReg);
|
---|
1528 |
|
---|
1529 | /* Make a copy of the core state now as we start the if-block. */
|
---|
1530 | iemNativeCondStartIfBlock(pReNative, off);
|
---|
1531 |
|
---|
1532 | return off;
|
---|
1533 | }
|
---|
1534 |
|
---|
1535 |
|
---|
1536 | #define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
|
---|
1537 | off = iemNativeEmitIfEflagsBitNotSetAndTwoBitsEqual(pReNative, off, a_fBit, a_fBit1, a_fBit2, false /*fInverted*/); \
|
---|
1538 | do {
|
---|
1539 |
|
---|
1540 | #define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
|
---|
1541 | off = iemNativeEmitIfEflagsBitNotSetAndTwoBitsEqual(pReNative, off, a_fBit, a_fBit1, a_fBit2, true /*fInverted*/); \
|
---|
1542 | do {
|
---|
1543 |
|
---|
1544 | /** Emits code for IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ and
|
---|
1545 | * IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE. */
|
---|
1546 | DECL_INLINE_THROW(uint32_t)
|
---|
1547 | iemNativeEmitIfEflagsBitNotSetAndTwoBitsEqual(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl,
|
---|
1548 | uint32_t fBit1InEfl, uint32_t fBit2InEfl, bool fInverted)
|
---|
1549 | {
|
---|
1550 | IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl | fBit1InEfl | fBit2InEfl);
|
---|
1551 | PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
|
---|
1552 |
|
---|
1553 | /* We need an if-block label for the non-inverted variant. */
|
---|
1554 | uint32_t const idxLabelIf = fInverted ? iemNativeLabelCreate(pReNative, kIemNativeLabelType_If, UINT32_MAX,
|
---|
1555 | pReNative->paLabels[pEntry->idxLabelElse].uData) : UINT32_MAX;
|
---|
1556 |
|
---|
1557 | /* Get the eflags. */
|
---|
1558 | uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags,
|
---|
1559 | kIemNativeGstRegUse_ReadOnly);
|
---|
1560 |
|
---|
1561 | /* Translate the flag masks to bit numbers. */
|
---|
1562 | unsigned const iBitNo = ASMBitFirstSetU32(fBitInEfl) - 1;
|
---|
1563 | Assert(RT_BIT_32(iBitNo) == fBitInEfl);
|
---|
1564 |
|
---|
1565 | unsigned const iBitNo1 = ASMBitFirstSetU32(fBit1InEfl) - 1;
|
---|
1566 | Assert(RT_BIT_32(iBitNo1) == fBit1InEfl);
|
---|
1567 | Assert(iBitNo1 != iBitNo);
|
---|
1568 |
|
---|
1569 | unsigned const iBitNo2 = ASMBitFirstSetU32(fBit2InEfl) - 1;
|
---|
1570 | Assert(RT_BIT_32(iBitNo2) == fBit2InEfl);
|
---|
1571 | Assert(iBitNo2 != iBitNo);
|
---|
1572 | Assert(iBitNo2 != iBitNo1);
|
---|
1573 |
|
---|
1574 | #ifdef RT_ARCH_AMD64
|
---|
1575 | uint8_t const idxTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, fBit1InEfl); /* This must come before we jump anywhere! */
|
---|
1576 | #elif defined(RT_ARCH_ARM64)
|
---|
1577 | uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
|
---|
1578 | #endif
|
---|
1579 |
|
---|
1580 | /* Check for the lone bit first. */
|
---|
1581 | if (!fInverted)
|
---|
1582 | off = iemNativeEmitTestBitInGprAndJmpToLabelIfSet(pReNative, off, idxEflReg, iBitNo, pEntry->idxLabelElse);
|
---|
1583 | else
|
---|
1584 | off = iemNativeEmitTestBitInGprAndJmpToLabelIfSet(pReNative, off, idxEflReg, iBitNo, idxLabelIf);
|
---|
1585 |
|
---|
1586 | /* Then extract and compare the other two bits. */
|
---|
1587 | #ifdef RT_ARCH_AMD64
|
---|
1588 | off = iemNativeEmitAndGpr32ByGpr32(pReNative, off, idxTmpReg, idxEflReg);
|
---|
1589 | if (iBitNo1 > iBitNo2)
|
---|
1590 | off = iemNativeEmitShiftGpr32Right(pReNative, off, idxTmpReg, iBitNo1 - iBitNo2);
|
---|
1591 | else
|
---|
1592 | off = iemNativeEmitShiftGpr32Left(pReNative, off, idxTmpReg, iBitNo2 - iBitNo1);
|
---|
1593 | off = iemNativeEmitXorGpr32ByGpr32(pReNative, off, idxTmpReg, idxEflReg);
|
---|
1594 |
|
---|
1595 | #elif defined(RT_ARCH_ARM64)
|
---|
1596 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
|
---|
1597 |
|
---|
1598 | /* and tmpreg, eflreg, #1<<iBitNo1 */
|
---|
1599 | pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(idxTmpReg, idxEflReg, 0 /*uImm7SizeLen -> 32*/, 32 - iBitNo1, false /*f64Bit*/);
|
---|
1600 |
|
---|
1601 | /* eeyore tmpreg, eflreg, tmpreg, LSL/LSR, #abs(iBitNo2 - iBitNo1) */
|
---|
1602 | if (iBitNo1 > iBitNo2)
|
---|
1603 | pu32CodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg, idxEflReg, idxTmpReg, false /*64bit*/,
|
---|
1604 | iBitNo1 - iBitNo2, kArmv8A64InstrShift_Lsr);
|
---|
1605 | else
|
---|
1606 | pu32CodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg, idxEflReg, idxTmpReg, false /*64bit*/,
|
---|
1607 | iBitNo2 - iBitNo1, kArmv8A64InstrShift_Lsl);
|
---|
1608 |
|
---|
1609 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
1610 |
|
---|
1611 | #else
|
---|
1612 | # error "Port me"
|
---|
1613 | #endif
|
---|
1614 |
|
---|
1615 | /* Test (bit #2 is set in tmpreg if not-equal) and jump. */
|
---|
1616 | off = iemNativeEmitTestBitInGprAndJmpToLabelIfCc(pReNative, off, idxTmpReg, iBitNo2,
|
---|
1617 | pEntry->idxLabelElse, !fInverted /*fJmpIfSet*/);
|
---|
1618 |
|
---|
1619 | /* Free but don't flush the EFlags and tmp registers. */
|
---|
1620 | iemNativeRegFreeTmp(pReNative, idxTmpReg);
|
---|
1621 | iemNativeRegFreeTmp(pReNative, idxEflReg);
|
---|
1622 |
|
---|
1623 | /* Make a copy of the core state now as we start the if-block. */
|
---|
1624 | iemNativeCondStartIfBlock(pReNative, off, idxLabelIf);
|
---|
1625 |
|
---|
1626 | return off;
|
---|
1627 | }
|
---|
1628 |
|
---|
1629 |
|
---|
1630 | #define IEM_MC_IF_CX_IS_NZ() \
|
---|
1631 | off = iemNativeEmitIfCxIsNotZero(pReNative, off); \
|
---|
1632 | do {
|
---|
1633 |
|
---|
1634 | /** Emits code for IEM_MC_IF_CX_IS_NZ. */
|
---|
1635 | DECL_INLINE_THROW(uint32_t) iemNativeEmitIfCxIsNotZero(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
1636 | {
|
---|
1637 | PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
|
---|
1638 |
|
---|
1639 | uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX),
|
---|
1640 | kIemNativeGstRegUse_ReadOnly);
|
---|
1641 | off = iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfNoneSet(pReNative, off, idxGstRcxReg, UINT16_MAX, pEntry->idxLabelElse);
|
---|
1642 | iemNativeRegFreeTmp(pReNative, idxGstRcxReg);
|
---|
1643 |
|
---|
1644 | iemNativeCondStartIfBlock(pReNative, off);
|
---|
1645 | return off;
|
---|
1646 | }
|
---|
1647 |
|
---|
1648 |
|
---|
1649 | #define IEM_MC_IF_ECX_IS_NZ() \
|
---|
1650 | off = iemNativeEmitIfRcxEcxIsNotZero(pReNative, off, false /*f64Bit*/); \
|
---|
1651 | do {
|
---|
1652 |
|
---|
1653 | #define IEM_MC_IF_RCX_IS_NZ() \
|
---|
1654 | off = iemNativeEmitIfRcxEcxIsNotZero(pReNative, off, true /*f64Bit*/); \
|
---|
1655 | do {
|
---|
1656 |
|
---|
1657 | /** Emits code for IEM_MC_IF_ECX_IS_NZ and IEM_MC_IF_RCX_IS_NZ. */
|
---|
1658 | DECL_INLINE_THROW(uint32_t) iemNativeEmitIfRcxEcxIsNotZero(PIEMRECOMPILERSTATE pReNative, uint32_t off, bool f64Bit)
|
---|
1659 | {
|
---|
1660 | PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
|
---|
1661 |
|
---|
1662 | uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX),
|
---|
1663 | kIemNativeGstRegUse_ReadOnly);
|
---|
1664 | off = iemNativeEmitTestIfGprIsZeroAndJmpToLabel(pReNative, off, idxGstRcxReg, f64Bit, pEntry->idxLabelElse);
|
---|
1665 | iemNativeRegFreeTmp(pReNative, idxGstRcxReg);
|
---|
1666 |
|
---|
1667 | iemNativeCondStartIfBlock(pReNative, off);
|
---|
1668 | return off;
|
---|
1669 | }
|
---|
1670 |
|
---|
1671 |
|
---|
1672 | #define IEM_MC_IF_CX_IS_NOT_ONE() \
|
---|
1673 | off = iemNativeEmitIfCxIsNotOne(pReNative, off); \
|
---|
1674 | do {
|
---|
1675 |
|
---|
1676 | /** Emits code for IEM_MC_IF_CX_IS_NOT_ONE. */
|
---|
1677 | DECL_INLINE_THROW(uint32_t) iemNativeEmitIfCxIsNotOne(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
1678 | {
|
---|
1679 | PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
|
---|
1680 |
|
---|
1681 | uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX),
|
---|
1682 | kIemNativeGstRegUse_ReadOnly);
|
---|
1683 | #ifdef RT_ARCH_AMD64
|
---|
1684 | off = iemNativeEmitTestIfGpr16EqualsImmAndJmpToLabel(pReNative, off, idxGstRcxReg, 1, pEntry->idxLabelElse);
|
---|
1685 | #else
|
---|
1686 | uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
|
---|
1687 | off = iemNativeEmitTestIfGpr16EqualsImmAndJmpToLabel(pReNative, off, idxGstRcxReg, 1, pEntry->idxLabelElse, idxTmpReg);
|
---|
1688 | iemNativeRegFreeTmp(pReNative, idxTmpReg);
|
---|
1689 | #endif
|
---|
1690 | iemNativeRegFreeTmp(pReNative, idxGstRcxReg);
|
---|
1691 |
|
---|
1692 | iemNativeCondStartIfBlock(pReNative, off);
|
---|
1693 | return off;
|
---|
1694 | }
|
---|
1695 |
|
---|
1696 |
|
---|
1697 | #define IEM_MC_IF_ECX_IS_NOT_ONE() \
|
---|
1698 | off = iemNativeEmitIfRcxEcxIsNotOne(pReNative, off, false /*f64Bit*/); \
|
---|
1699 | do {
|
---|
1700 |
|
---|
1701 | #define IEM_MC_IF_RCX_IS_NOT_ONE() \
|
---|
1702 | off = iemNativeEmitIfRcxEcxIsNotOne(pReNative, off, true /*f64Bit*/); \
|
---|
1703 | do {
|
---|
1704 |
|
---|
1705 | /** Emits code for IEM_MC_IF_ECX_IS_NOT_ONE and IEM_MC_IF_RCX_IS_NOT_ONE. */
|
---|
1706 | DECL_INLINE_THROW(uint32_t) iemNativeEmitIfRcxEcxIsNotOne(PIEMRECOMPILERSTATE pReNative, uint32_t off, bool f64Bit)
|
---|
1707 | {
|
---|
1708 | PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
|
---|
1709 |
|
---|
1710 | uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX),
|
---|
1711 | kIemNativeGstRegUse_ReadOnly);
|
---|
1712 | if (f64Bit)
|
---|
1713 | off = iemNativeEmitTestIfGprEqualsImmAndJmpToLabel(pReNative, off, idxGstRcxReg, 1, pEntry->idxLabelElse);
|
---|
1714 | else
|
---|
1715 | off = iemNativeEmitTestIfGpr32EqualsImmAndJmpToLabel(pReNative, off, idxGstRcxReg, 1, pEntry->idxLabelElse);
|
---|
1716 | iemNativeRegFreeTmp(pReNative, idxGstRcxReg);
|
---|
1717 |
|
---|
1718 | iemNativeCondStartIfBlock(pReNative, off);
|
---|
1719 | return off;
|
---|
1720 | }
|
---|
1721 |
|
---|
1722 |
|
---|
1723 | #define IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
|
---|
1724 | off = iemNativeEmitIfCxIsNotOneAndTestEflagsBit(pReNative, off, a_fBit, true /*fCheckIfSet*/); \
|
---|
1725 | do {
|
---|
1726 |
|
---|
1727 | #define IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
|
---|
1728 | off = iemNativeEmitIfCxIsNotOneAndTestEflagsBit(pReNative, off, a_fBit, false /*fCheckIfSet*/); \
|
---|
1729 | do {
|
---|
1730 |
|
---|
1731 | /** Emits code for IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_SET and
|
---|
1732 | * IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET. */
|
---|
1733 | DECL_INLINE_THROW(uint32_t)
|
---|
1734 | iemNativeEmitIfCxIsNotOneAndTestEflagsBit(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl, bool fCheckIfSet)
|
---|
1735 | {
|
---|
1736 | IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl);
|
---|
1737 | PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
|
---|
1738 |
|
---|
1739 | /* We have to load both RCX and EFLAGS before we can start branching,
|
---|
1740 | otherwise we'll end up in the else-block with an inconsistent
|
---|
1741 | register allocator state.
|
---|
1742 | Doing EFLAGS first as it's more likely to be loaded, right? */
|
---|
1743 | uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags,
|
---|
1744 | kIemNativeGstRegUse_ReadOnly);
|
---|
1745 | uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX),
|
---|
1746 | kIemNativeGstRegUse_ReadOnly);
|
---|
1747 |
|
---|
1748 | /** @todo we could reduce this to a single branch instruction by spending a
|
---|
1749 | * temporary register and some setnz stuff. Not sure if loops are
|
---|
1750 | * worth it. */
|
---|
1751 | /* Check CX. */
|
---|
1752 | #ifdef RT_ARCH_AMD64
|
---|
1753 | off = iemNativeEmitTestIfGpr16EqualsImmAndJmpToLabel(pReNative, off, idxGstRcxReg, 1, pEntry->idxLabelElse);
|
---|
1754 | #else
|
---|
1755 | uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
|
---|
1756 | off = iemNativeEmitTestIfGpr16EqualsImmAndJmpToLabel(pReNative, off, idxGstRcxReg, 1, pEntry->idxLabelElse, idxTmpReg);
|
---|
1757 | iemNativeRegFreeTmp(pReNative, idxTmpReg);
|
---|
1758 | #endif
|
---|
1759 |
|
---|
1760 | /* Check the EFlags bit. */
|
---|
1761 | unsigned const iBitNo = ASMBitFirstSetU32(fBitInEfl) - 1;
|
---|
1762 | Assert(RT_BIT_32(iBitNo) == fBitInEfl);
|
---|
1763 | off = iemNativeEmitTestBitInGprAndJmpToLabelIfCc(pReNative, off, idxEflReg, iBitNo, pEntry->idxLabelElse,
|
---|
1764 | !fCheckIfSet /*fJmpIfSet*/);
|
---|
1765 |
|
---|
1766 | iemNativeRegFreeTmp(pReNative, idxGstRcxReg);
|
---|
1767 | iemNativeRegFreeTmp(pReNative, idxEflReg);
|
---|
1768 |
|
---|
1769 | iemNativeCondStartIfBlock(pReNative, off);
|
---|
1770 | return off;
|
---|
1771 | }
|
---|
1772 |
|
---|
1773 |
|
---|
1774 | #define IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
|
---|
1775 | off = iemNativeEmitIfRcxEcxIsNotOneAndTestEflagsBit(pReNative, off, a_fBit, true /*fCheckIfSet*/, false /*f64Bit*/); \
|
---|
1776 | do {
|
---|
1777 |
|
---|
1778 | #define IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
|
---|
1779 | off = iemNativeEmitIfRcxEcxIsNotOneAndTestEflagsBit(pReNative, off, a_fBit, false /*fCheckIfSet*/, false /*f64Bit*/); \
|
---|
1780 | do {
|
---|
1781 |
|
---|
1782 | #define IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
|
---|
1783 | off = iemNativeEmitIfRcxEcxIsNotOneAndTestEflagsBit(pReNative, off, a_fBit, true /*fCheckIfSet*/, true /*f64Bit*/); \
|
---|
1784 | do {
|
---|
1785 |
|
---|
1786 | #define IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
|
---|
1787 | off = iemNativeEmitIfRcxEcxIsNotOneAndTestEflagsBit(pReNative, off, a_fBit, false /*fCheckIfSet*/, true /*f64Bit*/); \
|
---|
1788 | do {
|
---|
1789 |
|
---|
1790 | /** Emits code for IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_SET,
|
---|
1791 | * IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET,
|
---|
1792 | * IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_SET and
|
---|
1793 | * IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET. */
|
---|
1794 | DECL_INLINE_THROW(uint32_t)
|
---|
1795 | iemNativeEmitIfRcxEcxIsNotOneAndTestEflagsBit(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
1796 | uint32_t fBitInEfl, bool fCheckIfSet, bool f64Bit)
|
---|
1797 | {
|
---|
1798 | IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl);
|
---|
1799 | PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
|
---|
1800 |
|
---|
1801 | /* We have to load both RCX and EFLAGS before we can start branching,
|
---|
1802 | otherwise we'll end up in the else-block with an inconsistent
|
---|
1803 | register allocator state.
|
---|
1804 | Doing EFLAGS first as it's more likely to be loaded, right? */
|
---|
1805 | uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags,
|
---|
1806 | kIemNativeGstRegUse_ReadOnly);
|
---|
1807 | uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX),
|
---|
1808 | kIemNativeGstRegUse_ReadOnly);
|
---|
1809 |
|
---|
1810 | /** @todo we could reduce this to a single branch instruction by spending a
|
---|
1811 | * temporary register and some setnz stuff. Not sure if loops are
|
---|
1812 | * worth it. */
|
---|
1813 | /* Check RCX/ECX. */
|
---|
1814 | if (f64Bit)
|
---|
1815 | off = iemNativeEmitTestIfGprEqualsImmAndJmpToLabel(pReNative, off, idxGstRcxReg, 1, pEntry->idxLabelElse);
|
---|
1816 | else
|
---|
1817 | off = iemNativeEmitTestIfGpr32EqualsImmAndJmpToLabel(pReNative, off, idxGstRcxReg, 1, pEntry->idxLabelElse);
|
---|
1818 |
|
---|
1819 | /* Check the EFlags bit. */
|
---|
1820 | unsigned const iBitNo = ASMBitFirstSetU32(fBitInEfl) - 1;
|
---|
1821 | Assert(RT_BIT_32(iBitNo) == fBitInEfl);
|
---|
1822 | off = iemNativeEmitTestBitInGprAndJmpToLabelIfCc(pReNative, off, idxEflReg, iBitNo, pEntry->idxLabelElse,
|
---|
1823 | !fCheckIfSet /*fJmpIfSet*/);
|
---|
1824 |
|
---|
1825 | iemNativeRegFreeTmp(pReNative, idxGstRcxReg);
|
---|
1826 | iemNativeRegFreeTmp(pReNative, idxEflReg);
|
---|
1827 |
|
---|
1828 | iemNativeCondStartIfBlock(pReNative, off);
|
---|
1829 | return off;
|
---|
1830 | }
|
---|
1831 |
|
---|
1832 |
|
---|
1833 | #define IEM_MC_IF_LOCAL_IS_Z(a_Local) \
|
---|
1834 | off = iemNativeEmitIfLocalIsZ(pReNative, off, a_Local); \
|
---|
1835 | do {
|
---|
1836 |
|
---|
1837 | /** Emits code for IEM_MC_IF_LOCAL_IS_Z. */
|
---|
1838 | DECL_INLINE_THROW(uint32_t)
|
---|
1839 | iemNativeEmitIfLocalIsZ(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarLocal)
|
---|
1840 | {
|
---|
1841 | PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
|
---|
1842 |
|
---|
1843 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarLocal);
|
---|
1844 | PIEMNATIVEVAR const pVarRc = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVarLocal)];
|
---|
1845 | AssertStmt(pVarRc->uArgNo == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_8));
|
---|
1846 | AssertStmt(pVarRc->cbVar == sizeof(int32_t), IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_9));
|
---|
1847 |
|
---|
1848 | uint8_t const idxReg = iemNativeVarRegisterAcquire(pReNative, idxVarLocal, &off);
|
---|
1849 |
|
---|
1850 | off = iemNativeEmitTestIfGprIsNotZeroAndJmpToLabel(pReNative, off, idxReg, false /*f64Bit*/, pEntry->idxLabelElse);
|
---|
1851 |
|
---|
1852 | iemNativeVarRegisterRelease(pReNative, idxVarLocal);
|
---|
1853 |
|
---|
1854 | iemNativeCondStartIfBlock(pReNative, off);
|
---|
1855 | return off;
|
---|
1856 | }
|
---|
1857 |
|
---|
1858 |
|
---|
1859 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1860 |
|
---|
1861 | #define IEM_MC_IF_MXCSR_XCPT_PENDING() \
|
---|
1862 | off = iemNativeEmitIfMxcsrXcptPending(pReNative, off); \
|
---|
1863 | do {
|
---|
1864 |
|
---|
1865 | /** Emits code for IEM_MC_IF_MXCSR_XCPT_PENDING. */
|
---|
1866 | DECL_INLINE_THROW(uint32_t)
|
---|
1867 | iemNativeEmitIfMxcsrXcptPending(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
1868 | {
|
---|
1869 | PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
|
---|
1870 |
|
---|
1871 | uint8_t const idxGstMxcsrReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_MxCsr,
|
---|
1872 | kIemNativeGstRegUse_Calculation);
|
---|
1873 | uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off);
|
---|
1874 |
|
---|
1875 | /* mov tmp0, mxcsr */
|
---|
1876 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegTmp, idxGstMxcsrReg);
|
---|
1877 | /* tmp0 &= X86_MXCSR_XCPT_FLAGS */
|
---|
1878 | off = iemNativeEmitAndGprByImm(pReNative, off, idxRegTmp, X86_MXCSR_XCPT_FLAGS);
|
---|
1879 | /* mxcsr &= X86_MXCSR_XCPT_MASK */
|
---|
1880 | off = iemNativeEmitAndGprByImm(pReNative, off, idxGstMxcsrReg, X86_MXCSR_XCPT_MASK);
|
---|
1881 | /* mxcsr ~= mxcsr */
|
---|
1882 | off = iemNativeEmitInvBitsGpr(pReNative, off, idxGstMxcsrReg, idxGstMxcsrReg);
|
---|
1883 | /* mxcsr >>= X86_MXCSR_XCPT_MASK_SHIFT */
|
---|
1884 | off = iemNativeEmitShiftGprRight(pReNative, off, idxGstMxcsrReg, X86_MXCSR_XCPT_MASK_SHIFT);
|
---|
1885 | /* tmp0 &= mxcsr */
|
---|
1886 | off = iemNativeEmitAndGprByGpr(pReNative, off, idxRegTmp, idxGstMxcsrReg);
|
---|
1887 |
|
---|
1888 | off = iemNativeEmitTestIfGprIsZeroAndJmpToLabel(pReNative, off, idxRegTmp, true /*f64Bit*/, pEntry->idxLabelElse);
|
---|
1889 | iemNativeRegFreeTmp(pReNative, idxGstMxcsrReg);
|
---|
1890 | iemNativeRegFreeTmp(pReNative, idxRegTmp);
|
---|
1891 |
|
---|
1892 | iemNativeCondStartIfBlock(pReNative, off);
|
---|
1893 | return off;
|
---|
1894 | }
|
---|
1895 |
|
---|
1896 | #endif
|
---|
1897 |
|
---|
1898 |
|
---|
1899 | /*********************************************************************************************************************************
|
---|
1900 | * Emitters for IEM_MC_ARG_XXX, IEM_MC_LOCAL, IEM_MC_LOCAL_CONST, ++ *
|
---|
1901 | *********************************************************************************************************************************/
|
---|
1902 |
|
---|
1903 | #define IEM_MC_NOREF(a_Name) \
|
---|
1904 | RT_NOREF_PV(a_Name)
|
---|
1905 |
|
---|
1906 | #define IEM_MC_ARG(a_Type, a_Name, a_iArg) \
|
---|
1907 | uint8_t const a_Name = iemNativeArgAlloc(pReNative, (a_iArg), sizeof(a_Type))
|
---|
1908 |
|
---|
1909 | #define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) \
|
---|
1910 | uint8_t const a_Name = iemNativeArgAllocConst(pReNative, (a_iArg), sizeof(a_Type), (a_Value))
|
---|
1911 |
|
---|
1912 | #define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) \
|
---|
1913 | uint8_t const a_Name = iemNativeArgAllocLocalRef(pReNative, (a_iArg), (a_Local))
|
---|
1914 |
|
---|
1915 | #define IEM_MC_LOCAL(a_Type, a_Name) \
|
---|
1916 | uint8_t const a_Name = iemNativeVarAlloc(pReNative, sizeof(a_Type))
|
---|
1917 |
|
---|
1918 | #define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) \
|
---|
1919 | uint8_t const a_Name = iemNativeVarAllocConst(pReNative, sizeof(a_Type), (a_Value))
|
---|
1920 |
|
---|
1921 |
|
---|
1922 | /**
|
---|
1923 | * Sets the host register for @a idxVarRc to @a idxReg.
|
---|
1924 | *
|
---|
1925 | * The register must not be allocated. Any guest register shadowing will be
|
---|
1926 | * implictly dropped by this call.
|
---|
1927 | *
|
---|
1928 | * The variable must not have any register associated with it (causes
|
---|
1929 | * VERR_IEM_VAR_IPE_10 to be raised). Conversion to a stack variable is
|
---|
1930 | * implied.
|
---|
1931 | *
|
---|
1932 | * @returns idxReg
|
---|
1933 | * @param pReNative The recompiler state.
|
---|
1934 | * @param idxVar The variable.
|
---|
1935 | * @param idxReg The host register (typically IEMNATIVE_CALL_RET_GREG).
|
---|
1936 | * @param off For recording in debug info.
|
---|
1937 | *
|
---|
1938 | * @throws VERR_IEM_VAR_IPE_10, VERR_IEM_VAR_IPE_11
|
---|
1939 | */
|
---|
1940 | DECL_INLINE_THROW(uint8_t) iemNativeVarRegisterSet(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, uint8_t idxReg, uint32_t off)
|
---|
1941 | {
|
---|
1942 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
1943 | PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
|
---|
1944 | Assert(!pVar->fRegAcquired);
|
---|
1945 | Assert(idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs));
|
---|
1946 | AssertStmt(pVar->idxReg == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_10));
|
---|
1947 | AssertStmt(!(pReNative->Core.bmHstRegs & RT_BIT_32(idxReg)), IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_11));
|
---|
1948 |
|
---|
1949 | iemNativeRegClearGstRegShadowing(pReNative, idxReg, off);
|
---|
1950 | iemNativeRegMarkAllocated(pReNative, idxReg, kIemNativeWhat_Var, idxVar);
|
---|
1951 |
|
---|
1952 | iemNativeVarSetKindToStack(pReNative, idxVar);
|
---|
1953 | pVar->idxReg = idxReg;
|
---|
1954 |
|
---|
1955 | return idxReg;
|
---|
1956 | }
|
---|
1957 |
|
---|
1958 |
|
---|
1959 | /**
|
---|
1960 | * A convenient helper function.
|
---|
1961 | */
|
---|
1962 | DECL_INLINE_THROW(uint8_t) iemNativeVarRegisterSetAndAcquire(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar,
|
---|
1963 | uint8_t idxReg, uint32_t *poff)
|
---|
1964 | {
|
---|
1965 | idxReg = iemNativeVarRegisterSet(pReNative, idxVar, idxReg, *poff);
|
---|
1966 | pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fRegAcquired = true;
|
---|
1967 | return idxReg;
|
---|
1968 | }
|
---|
1969 |
|
---|
1970 |
|
---|
1971 | /**
|
---|
1972 | * This is called by IEM_MC_END() to clean up all variables.
|
---|
1973 | */
|
---|
1974 | DECL_FORCE_INLINE(void) iemNativeVarFreeAll(PIEMRECOMPILERSTATE pReNative)
|
---|
1975 | {
|
---|
1976 | uint32_t const bmVars = pReNative->Core.bmVars;
|
---|
1977 | if (bmVars != 0)
|
---|
1978 | iemNativeVarFreeAllSlow(pReNative, bmVars);
|
---|
1979 | Assert(pReNative->Core.u64ArgVars == UINT64_MAX);
|
---|
1980 | Assert(pReNative->Core.bmStack == 0);
|
---|
1981 | }
|
---|
1982 |
|
---|
1983 |
|
---|
1984 | #define IEM_MC_FREE_LOCAL(a_Name) iemNativeVarFreeLocal(pReNative, a_Name)
|
---|
1985 |
|
---|
1986 | /**
|
---|
1987 | * This is called by IEM_MC_FREE_LOCAL.
|
---|
1988 | */
|
---|
1989 | DECLINLINE(void) iemNativeVarFreeLocal(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
|
---|
1990 | {
|
---|
1991 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
1992 | Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].uArgNo == UINT8_MAX);
|
---|
1993 | iemNativeVarFreeOneWorker(pReNative, IEMNATIVE_VAR_IDX_UNPACK(idxVar));
|
---|
1994 | }
|
---|
1995 |
|
---|
1996 |
|
---|
1997 | #define IEM_MC_FREE_ARG(a_Name) iemNativeVarFreeArg(pReNative, a_Name)
|
---|
1998 |
|
---|
1999 | /**
|
---|
2000 | * This is called by IEM_MC_FREE_ARG.
|
---|
2001 | */
|
---|
2002 | DECLINLINE(void) iemNativeVarFreeArg(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
|
---|
2003 | {
|
---|
2004 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
2005 | Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].uArgNo < RT_ELEMENTS(pReNative->Core.aidxArgVars));
|
---|
2006 | iemNativeVarFreeOneWorker(pReNative, IEMNATIVE_VAR_IDX_UNPACK(idxVar));
|
---|
2007 | }
|
---|
2008 |
|
---|
2009 |
|
---|
2010 | #define IEM_MC_ASSIGN_TO_SMALLER(a_VarDst, a_VarSrcEol) off = iemNativeVarAssignToSmaller(pReNative, off, a_VarDst, a_VarSrcEol)
|
---|
2011 |
|
---|
2012 | /**
|
---|
2013 | * This is called by IEM_MC_ASSIGN_TO_SMALLER.
|
---|
2014 | */
|
---|
2015 | DECL_INLINE_THROW(uint32_t)
|
---|
2016 | iemNativeVarAssignToSmaller(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint8_t idxVarSrc)
|
---|
2017 | {
|
---|
2018 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarDst);
|
---|
2019 | PIEMNATIVEVAR const pVarDst = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVarDst)];
|
---|
2020 | AssertStmt(pVarDst->enmKind == kIemNativeVarKind_Invalid, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
|
---|
2021 | Assert( pVarDst->cbVar == sizeof(uint16_t)
|
---|
2022 | || pVarDst->cbVar == sizeof(uint32_t));
|
---|
2023 |
|
---|
2024 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarSrc);
|
---|
2025 | PIEMNATIVEVAR const pVarSrc = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVarSrc)];
|
---|
2026 | AssertStmt( pVarSrc->enmKind == kIemNativeVarKind_Stack
|
---|
2027 | || pVarSrc->enmKind == kIemNativeVarKind_Immediate,
|
---|
2028 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
|
---|
2029 |
|
---|
2030 | Assert(pVarDst->cbVar < pVarSrc->cbVar);
|
---|
2031 |
|
---|
2032 | /*
|
---|
2033 | * Special case for immediates.
|
---|
2034 | */
|
---|
2035 | if (pVarSrc->enmKind == kIemNativeVarKind_Immediate)
|
---|
2036 | {
|
---|
2037 | switch (pVarDst->cbVar)
|
---|
2038 | {
|
---|
2039 | case sizeof(uint16_t):
|
---|
2040 | iemNativeVarSetKindToConst(pReNative, idxVarDst, (uint16_t)pVarSrc->u.uValue);
|
---|
2041 | break;
|
---|
2042 | case sizeof(uint32_t):
|
---|
2043 | iemNativeVarSetKindToConst(pReNative, idxVarDst, (uint32_t)pVarSrc->u.uValue);
|
---|
2044 | break;
|
---|
2045 | default: AssertFailed(); break;
|
---|
2046 | }
|
---|
2047 | }
|
---|
2048 | else
|
---|
2049 | {
|
---|
2050 | /*
|
---|
2051 | * The generic solution for now.
|
---|
2052 | */
|
---|
2053 | /** @todo optimize this by having the python script make sure the source
|
---|
2054 | * variable passed to IEM_MC_ASSIGN_TO_SMALLER is not used after the
|
---|
2055 | * statement. Then we could just transfer the register assignments. */
|
---|
2056 | uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off);
|
---|
2057 | uint8_t const idxRegSrc = iemNativeVarRegisterAcquire(pReNative, idxVarSrc, &off);
|
---|
2058 | switch (pVarDst->cbVar)
|
---|
2059 | {
|
---|
2060 | case sizeof(uint16_t):
|
---|
2061 | off = iemNativeEmitLoadGprFromGpr16(pReNative, off, idxRegDst, idxRegSrc);
|
---|
2062 | break;
|
---|
2063 | case sizeof(uint32_t):
|
---|
2064 | off = iemNativeEmitLoadGprFromGpr32(pReNative, off, idxRegDst, idxRegSrc);
|
---|
2065 | break;
|
---|
2066 | default: AssertFailed(); break;
|
---|
2067 | }
|
---|
2068 | iemNativeVarRegisterRelease(pReNative, idxVarSrc);
|
---|
2069 | iemNativeVarRegisterRelease(pReNative, idxVarDst);
|
---|
2070 | }
|
---|
2071 | return off;
|
---|
2072 | }
|
---|
2073 |
|
---|
2074 |
|
---|
2075 |
|
---|
2076 | /*********************************************************************************************************************************
|
---|
2077 | * Emitters for IEM_MC_CALL_CIMPL_XXX *
|
---|
2078 | *********************************************************************************************************************************/
|
---|
2079 |
|
---|
2080 | /** Common emit function for IEM_MC_CALL_CIMPL_XXXX. */
|
---|
2081 | DECL_INLINE_THROW(uint32_t)
|
---|
2082 | iemNativeEmitCallCImplCommon(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr, uint8_t idxInstr,
|
---|
2083 | uint64_t fGstShwFlush, uintptr_t pfnCImpl, uint8_t cArgs)
|
---|
2084 |
|
---|
2085 | {
|
---|
2086 | IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, X86_EFL_STATUS_BITS);
|
---|
2087 |
|
---|
2088 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
2089 | /* Clear the appropriate check emitted flags when a helper is called which could modify a control register. */
|
---|
2090 | if (pfnCImpl == (uintptr_t)iemCImpl_xsetbv) /* Modifies xcr0 which only the AVX check uses. */
|
---|
2091 | pReNative->fSimdRaiseXcptChecksEmitted &= ~IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_AVX;
|
---|
2092 | else if (pfnCImpl == (uintptr_t)iemCImpl_mov_Cd_Rd) /* Can modify cr4 which all checks use. */
|
---|
2093 | pReNative->fSimdRaiseXcptChecksEmitted = 0;
|
---|
2094 | else if ( pfnCImpl == (uintptr_t)iemCImpl_FarJmp
|
---|
2095 | || pfnCImpl == (uintptr_t)iemCImpl_callf
|
---|
2096 | || pfnCImpl == (uintptr_t)iemCImpl_lmsw
|
---|
2097 | || pfnCImpl == (uintptr_t)iemCImpl_clts) /* Will only modify cr0 */
|
---|
2098 | pReNative->fSimdRaiseXcptChecksEmitted &= ~( IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_AVX
|
---|
2099 | | IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_SSE
|
---|
2100 | | IEMNATIVE_SIMD_RAISE_XCPT_CHECKS_EMITTED_MAYBE_DEVICE_NOT_AVAILABLE);
|
---|
2101 | #endif
|
---|
2102 |
|
---|
2103 | /*
|
---|
2104 | * Do all the call setup and cleanup.
|
---|
2105 | */
|
---|
2106 | off = iemNativeEmitCallCommon(pReNative, off, cArgs + IEM_CIMPL_HIDDEN_ARGS, IEM_CIMPL_HIDDEN_ARGS);
|
---|
2107 |
|
---|
2108 | /*
|
---|
2109 | * Load the two or three hidden arguments.
|
---|
2110 | */
|
---|
2111 | #if defined(VBOXSTRICTRC_STRICT_ENABLED) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64)
|
---|
2112 | off = iemNativeEmitLeaGprByBp(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict */
|
---|
2113 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
2114 | off = iemNativeEmitLoadGpr8Imm(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, cbInstr);
|
---|
2115 | #else
|
---|
2116 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
2117 | off = iemNativeEmitLoadGpr8Imm(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, cbInstr);
|
---|
2118 | #endif
|
---|
2119 |
|
---|
2120 | /*
|
---|
2121 | * Make the call and check the return code.
|
---|
2122 | *
|
---|
2123 | * Shadow PC copies are always flushed here, other stuff depends on flags.
|
---|
2124 | * Segment and general purpose registers are explictily flushed via the
|
---|
2125 | * IEM_MC_HINT_FLUSH_GUEST_SHADOW_GREG and IEM_MC_HINT_FLUSH_GUEST_SHADOW_SREG
|
---|
2126 | * macros.
|
---|
2127 | */
|
---|
2128 | off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)pfnCImpl);
|
---|
2129 | #if defined(VBOXSTRICTRC_STRICT_ENABLED) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64)
|
---|
2130 | off = iemNativeEmitLoadGprByBpU32(pReNative, off, X86_GREG_xAX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict (see above) */
|
---|
2131 | #endif
|
---|
2132 | fGstShwFlush = iemNativeCImplFlagsToGuestShadowFlushMask(pReNative->fCImpl, fGstShwFlush | RT_BIT_64(kIemNativeGstReg_Pc));
|
---|
2133 | if (!(pReNative->fMc & IEM_MC_F_WITHOUT_FLAGS)) /** @todo We don't emit with-flags/without-flags variations for CIMPL calls. */
|
---|
2134 | fGstShwFlush |= RT_BIT_64(kIemNativeGstReg_EFlags);
|
---|
2135 | iemNativeRegFlushGuestShadows(pReNative, fGstShwFlush);
|
---|
2136 |
|
---|
2137 | return iemNativeEmitCheckCallRetAndPassUp(pReNative, off, idxInstr);
|
---|
2138 | }
|
---|
2139 |
|
---|
2140 |
|
---|
2141 | #define IEM_MC_CALL_CIMPL_1_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
|
---|
2142 | off = iemNativeEmitCallCImpl1(pReNative, off, a_cbInstr, pCallEntry->idxInstr, a_fGstShwFlush, (uintptr_t)a_pfnCImpl, a0)
|
---|
2143 |
|
---|
2144 | /** Emits code for IEM_MC_CALL_CIMPL_1. */
|
---|
2145 | DECL_INLINE_THROW(uint32_t)
|
---|
2146 | iemNativeEmitCallCImpl1(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr, uint8_t idxInstr, uint64_t fGstShwFlush,
|
---|
2147 | uintptr_t pfnCImpl, uint8_t idxArg0)
|
---|
2148 | {
|
---|
2149 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg0, 0 + IEM_CIMPL_HIDDEN_ARGS);
|
---|
2150 | return iemNativeEmitCallCImplCommon(pReNative, off, cbInstr, idxInstr, fGstShwFlush, pfnCImpl, 1);
|
---|
2151 | }
|
---|
2152 |
|
---|
2153 |
|
---|
2154 | #define IEM_MC_CALL_CIMPL_2_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
|
---|
2155 | off = iemNativeEmitCallCImpl2(pReNative, off, a_cbInstr, pCallEntry->idxInstr, a_fGstShwFlush, (uintptr_t)a_pfnCImpl, a0, a1)
|
---|
2156 |
|
---|
2157 | /** Emits code for IEM_MC_CALL_CIMPL_2. */
|
---|
2158 | DECL_INLINE_THROW(uint32_t)
|
---|
2159 | iemNativeEmitCallCImpl2(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr, uint8_t idxInstr, uint64_t fGstShwFlush,
|
---|
2160 | uintptr_t pfnCImpl, uint8_t idxArg0, uint8_t idxArg1)
|
---|
2161 | {
|
---|
2162 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg0, 0 + IEM_CIMPL_HIDDEN_ARGS);
|
---|
2163 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg1, 1 + IEM_CIMPL_HIDDEN_ARGS);
|
---|
2164 | return iemNativeEmitCallCImplCommon(pReNative, off, cbInstr, idxInstr, fGstShwFlush, pfnCImpl, 2);
|
---|
2165 | }
|
---|
2166 |
|
---|
2167 |
|
---|
2168 | #define IEM_MC_CALL_CIMPL_3_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
|
---|
2169 | off = iemNativeEmitCallCImpl3(pReNative, off, a_cbInstr, pCallEntry->idxInstr, a_fGstShwFlush, \
|
---|
2170 | (uintptr_t)a_pfnCImpl, a0, a1, a2)
|
---|
2171 |
|
---|
2172 | /** Emits code for IEM_MC_CALL_CIMPL_3. */
|
---|
2173 | DECL_INLINE_THROW(uint32_t)
|
---|
2174 | iemNativeEmitCallCImpl3(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr, uint8_t idxInstr, uint64_t fGstShwFlush,
|
---|
2175 | uintptr_t pfnCImpl, uint8_t idxArg0, uint8_t idxArg1, uint8_t idxArg2)
|
---|
2176 | {
|
---|
2177 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg0, 0 + IEM_CIMPL_HIDDEN_ARGS);
|
---|
2178 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg1, 1 + IEM_CIMPL_HIDDEN_ARGS);
|
---|
2179 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg2, 2 + IEM_CIMPL_HIDDEN_ARGS);
|
---|
2180 | return iemNativeEmitCallCImplCommon(pReNative, off, cbInstr, idxInstr, fGstShwFlush, pfnCImpl, 3);
|
---|
2181 | }
|
---|
2182 |
|
---|
2183 |
|
---|
2184 | #define IEM_MC_CALL_CIMPL_4_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3) \
|
---|
2185 | off = iemNativeEmitCallCImpl4(pReNative, off, a_cbInstr, pCallEntry->idxInstr, a_fGstShwFlush, \
|
---|
2186 | (uintptr_t)a_pfnCImpl, a0, a1, a2, a3)
|
---|
2187 |
|
---|
2188 | /** Emits code for IEM_MC_CALL_CIMPL_4. */
|
---|
2189 | DECL_INLINE_THROW(uint32_t)
|
---|
2190 | iemNativeEmitCallCImpl4(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr, uint8_t idxInstr, uint64_t fGstShwFlush,
|
---|
2191 | uintptr_t pfnCImpl, uint8_t idxArg0, uint8_t idxArg1, uint8_t idxArg2, uint8_t idxArg3)
|
---|
2192 | {
|
---|
2193 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg0, 0 + IEM_CIMPL_HIDDEN_ARGS);
|
---|
2194 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg1, 1 + IEM_CIMPL_HIDDEN_ARGS);
|
---|
2195 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg2, 2 + IEM_CIMPL_HIDDEN_ARGS);
|
---|
2196 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg3, 3 + IEM_CIMPL_HIDDEN_ARGS);
|
---|
2197 | return iemNativeEmitCallCImplCommon(pReNative, off, cbInstr, idxInstr, fGstShwFlush, pfnCImpl, 4);
|
---|
2198 | }
|
---|
2199 |
|
---|
2200 |
|
---|
2201 | #define IEM_MC_CALL_CIMPL_5_THREADED(a_cbInstr, a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3, a4) \
|
---|
2202 | off = iemNativeEmitCallCImpl5(pReNative, off, a_cbInstr, pCallEntry->idxInstr, a_fGstShwFlush, \
|
---|
2203 | (uintptr_t)a_pfnCImpl, a0, a1, a2, a3, a4)
|
---|
2204 |
|
---|
2205 | /** Emits code for IEM_MC_CALL_CIMPL_4. */
|
---|
2206 | DECL_INLINE_THROW(uint32_t)
|
---|
2207 | iemNativeEmitCallCImpl5(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr, uint8_t idxInstr, uint64_t fGstShwFlush,
|
---|
2208 | uintptr_t pfnCImpl, uint8_t idxArg0, uint8_t idxArg1, uint8_t idxArg2, uint8_t idxArg3, uint8_t idxArg4)
|
---|
2209 | {
|
---|
2210 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg0, 0 + IEM_CIMPL_HIDDEN_ARGS);
|
---|
2211 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg1, 1 + IEM_CIMPL_HIDDEN_ARGS);
|
---|
2212 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg2, 2 + IEM_CIMPL_HIDDEN_ARGS);
|
---|
2213 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg3, 3 + IEM_CIMPL_HIDDEN_ARGS);
|
---|
2214 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg4, 4 + IEM_CIMPL_HIDDEN_ARGS);
|
---|
2215 | return iemNativeEmitCallCImplCommon(pReNative, off, cbInstr, idxInstr, fGstShwFlush, pfnCImpl, 5);
|
---|
2216 | }
|
---|
2217 |
|
---|
2218 |
|
---|
2219 | /** Recompiler debugging: Flush guest register shadow copies. */
|
---|
2220 | #define IEM_MC_HINT_FLUSH_GUEST_SHADOW(g_fGstShwFlush) iemNativeRegFlushGuestShadows(pReNative, g_fGstShwFlush)
|
---|
2221 |
|
---|
2222 |
|
---|
2223 |
|
---|
2224 | /*********************************************************************************************************************************
|
---|
2225 | * Emitters for IEM_MC_CALL_VOID_AIMPL_XXX and IEM_MC_CALL_AIMPL_XXX *
|
---|
2226 | *********************************************************************************************************************************/
|
---|
2227 |
|
---|
2228 | /**
|
---|
2229 | * Common worker for IEM_MC_CALL_VOID_AIMPL_XXX and IEM_MC_CALL_AIMPL_XXX.
|
---|
2230 | */
|
---|
2231 | DECL_INLINE_THROW(uint32_t)
|
---|
2232 | iemNativeEmitCallAImplCommon(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarRc,
|
---|
2233 | uintptr_t pfnAImpl, uint8_t cArgs)
|
---|
2234 | {
|
---|
2235 | if (idxVarRc != UINT8_MAX)
|
---|
2236 | {
|
---|
2237 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarRc);
|
---|
2238 | PIEMNATIVEVAR const pVarRc = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVarRc)];
|
---|
2239 | AssertStmt(pVarRc->uArgNo == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_8));
|
---|
2240 | AssertStmt(pVarRc->cbVar <= sizeof(uint64_t), IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_9));
|
---|
2241 | }
|
---|
2242 |
|
---|
2243 | /*
|
---|
2244 | * Do all the call setup and cleanup.
|
---|
2245 | */
|
---|
2246 | off = iemNativeEmitCallCommon(pReNative, off, cArgs, 0 /*cHiddenArgs*/);
|
---|
2247 |
|
---|
2248 | /*
|
---|
2249 | * Make the call and update the return code variable if we've got one.
|
---|
2250 | */
|
---|
2251 | off = iemNativeEmitCallImm(pReNative, off, pfnAImpl);
|
---|
2252 | if (idxVarRc != UINT8_MAX)
|
---|
2253 | iemNativeVarRegisterSet(pReNative, idxVarRc, IEMNATIVE_CALL_RET_GREG, off);
|
---|
2254 |
|
---|
2255 | return off;
|
---|
2256 | }
|
---|
2257 |
|
---|
2258 |
|
---|
2259 |
|
---|
2260 | #define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) \
|
---|
2261 | off = iemNativeEmitCallAImpl0(pReNative, off, UINT8_MAX /*idxVarRc*/, (uintptr_t)(a_pfn))
|
---|
2262 |
|
---|
2263 | #define IEM_MC_CALL_AIMPL_0(a_rc, a_pfn) \
|
---|
2264 | off = iemNativeEmitCallAImpl0(pReNative, off, a_rc, (uintptr_t)(a_pfn))
|
---|
2265 |
|
---|
2266 | /** Emits code for IEM_MC_CALL_VOID_AIMPL_0 and IEM_MC_CALL_AIMPL_0. */
|
---|
2267 | DECL_INLINE_THROW(uint32_t)
|
---|
2268 | iemNativeEmitCallAImpl0(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarRc, uintptr_t pfnAImpl)
|
---|
2269 | {
|
---|
2270 | return iemNativeEmitCallAImplCommon(pReNative, off, idxVarRc, pfnAImpl, 0);
|
---|
2271 | }
|
---|
2272 |
|
---|
2273 |
|
---|
2274 | #define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) \
|
---|
2275 | off = iemNativeEmitCallAImpl1(pReNative, off, UINT8_MAX /*idxVarRc*/, (uintptr_t)(a_pfn), a0)
|
---|
2276 |
|
---|
2277 | #define IEM_MC_CALL_AIMPL_1(a_rc, a_pfn, a0) \
|
---|
2278 | off = iemNativeEmitCallAImpl1(pReNative, off, a_rc, (uintptr_t)(a_pfn), a0)
|
---|
2279 |
|
---|
2280 | /** Emits code for IEM_MC_CALL_VOID_AIMPL_1 and IEM_MC_CALL_AIMPL_1. */
|
---|
2281 | DECL_INLINE_THROW(uint32_t)
|
---|
2282 | iemNativeEmitCallAImpl1(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarRc, uintptr_t pfnAImpl, uint8_t idxArg0)
|
---|
2283 | {
|
---|
2284 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg0, 0);
|
---|
2285 | return iemNativeEmitCallAImplCommon(pReNative, off, idxVarRc, pfnAImpl, 1);
|
---|
2286 | }
|
---|
2287 |
|
---|
2288 |
|
---|
2289 | #define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) \
|
---|
2290 | off = iemNativeEmitCallAImpl2(pReNative, off, UINT8_MAX /*idxVarRc*/, (uintptr_t)(a_pfn), a0, a1)
|
---|
2291 |
|
---|
2292 | #define IEM_MC_CALL_AIMPL_2(a_rc, a_pfn, a0, a1) \
|
---|
2293 | off = iemNativeEmitCallAImpl2(pReNative, off, a_rc, (uintptr_t)(a_pfn), a0, a1)
|
---|
2294 |
|
---|
2295 | /** Emits code for IEM_MC_CALL_VOID_AIMPL_2 and IEM_MC_CALL_AIMPL_2. */
|
---|
2296 | DECL_INLINE_THROW(uint32_t)
|
---|
2297 | iemNativeEmitCallAImpl2(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarRc,
|
---|
2298 | uintptr_t pfnAImpl, uint8_t idxArg0, uint8_t idxArg1)
|
---|
2299 | {
|
---|
2300 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg0, 0);
|
---|
2301 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg1, 1);
|
---|
2302 | return iemNativeEmitCallAImplCommon(pReNative, off, idxVarRc, pfnAImpl, 2);
|
---|
2303 | }
|
---|
2304 |
|
---|
2305 |
|
---|
2306 | #define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) \
|
---|
2307 | off = iemNativeEmitCallAImpl3(pReNative, off, UINT8_MAX /*idxVarRc*/, (uintptr_t)(a_pfn), a0, a1, a2)
|
---|
2308 |
|
---|
2309 | #define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) \
|
---|
2310 | off = iemNativeEmitCallAImpl3(pReNative, off, a_rc, (uintptr_t)(a_pfn), a0, a1, a2)
|
---|
2311 |
|
---|
2312 | /** Emits code for IEM_MC_CALL_VOID_AIMPL_3 and IEM_MC_CALL_AIMPL_3. */
|
---|
2313 | DECL_INLINE_THROW(uint32_t)
|
---|
2314 | iemNativeEmitCallAImpl3(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarRc,
|
---|
2315 | uintptr_t pfnAImpl, uint8_t idxArg0, uint8_t idxArg1, uint8_t idxArg2)
|
---|
2316 | {
|
---|
2317 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg0, 0);
|
---|
2318 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg1, 1);
|
---|
2319 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg2, 2);
|
---|
2320 | return iemNativeEmitCallAImplCommon(pReNative, off, idxVarRc, pfnAImpl, 3);
|
---|
2321 | }
|
---|
2322 |
|
---|
2323 |
|
---|
2324 | #define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) \
|
---|
2325 | off = iemNativeEmitCallAImpl4(pReNative, off, UINT8_MAX /*idxVarRc*/, (uintptr_t)(a_pfn), a0, a1, a2, a3)
|
---|
2326 |
|
---|
2327 | #define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) \
|
---|
2328 | off = iemNativeEmitCallAImpl4(pReNative, off, a_rc, (uintptr_t)(a_pfn), a0, a1, a2, a3)
|
---|
2329 |
|
---|
2330 | /** Emits code for IEM_MC_CALL_VOID_AIMPL_4 and IEM_MC_CALL_AIMPL_4. */
|
---|
2331 | DECL_INLINE_THROW(uint32_t)
|
---|
2332 | iemNativeEmitCallAImpl4(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarRc,
|
---|
2333 | uintptr_t pfnAImpl, uint8_t idxArg0, uint8_t idxArg1, uint8_t idxArg2, uint8_t idxArg3)
|
---|
2334 | {
|
---|
2335 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg0, 0);
|
---|
2336 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg1, 1);
|
---|
2337 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg2, 2);
|
---|
2338 | IEMNATIVE_ASSERT_ARG_VAR_IDX(pReNative, idxArg3, 3);
|
---|
2339 | return iemNativeEmitCallAImplCommon(pReNative, off, idxVarRc, pfnAImpl, 4);
|
---|
2340 | }
|
---|
2341 |
|
---|
2342 |
|
---|
2343 |
|
---|
2344 | /*********************************************************************************************************************************
|
---|
2345 | * Emitters for general purpose register fetches (IEM_MC_FETCH_GREG_XXX). *
|
---|
2346 | *********************************************************************************************************************************/
|
---|
2347 |
|
---|
2348 | #define IEM_MC_FETCH_GREG_U8_THREADED(a_u8Dst, a_iGRegEx) \
|
---|
2349 | off = iemNativeEmitFetchGregU8(pReNative, off, a_u8Dst, a_iGRegEx, sizeof(uint8_t) /*cbZeroExtended*/)
|
---|
2350 |
|
---|
2351 | #define IEM_MC_FETCH_GREG_U8_ZX_U16_THREADED(a_u16Dst, a_iGRegEx) \
|
---|
2352 | off = iemNativeEmitFetchGregU8(pReNative, off, a_u16Dst, a_iGRegEx, sizeof(uint16_t) /*cbZeroExtended*/)
|
---|
2353 |
|
---|
2354 | #define IEM_MC_FETCH_GREG_U8_ZX_U32_THREADED(a_u32Dst, a_iGRegEx) \
|
---|
2355 | off = iemNativeEmitFetchGregU8(pReNative, off, a_u32Dst, a_iGRegEx, sizeof(uint32_t) /*cbZeroExtended*/)
|
---|
2356 |
|
---|
2357 | #define IEM_MC_FETCH_GREG_U8_ZX_U64_THREADED(a_u64Dst, a_iGRegEx) \
|
---|
2358 | off = iemNativeEmitFetchGregU8(pReNative, off, a_u64Dst, a_iGRegEx, sizeof(uint64_t) /*cbZeroExtended*/)
|
---|
2359 |
|
---|
2360 |
|
---|
2361 | /** Emits code for IEM_MC_FETCH_GREG_U8_THREADED and
|
---|
2362 | * IEM_MC_FETCH_GREG_U8_ZX_U16/32/64_THREADED. */
|
---|
2363 | DECL_INLINE_THROW(uint32_t)
|
---|
2364 | iemNativeEmitFetchGregU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar, uint8_t iGRegEx, int8_t cbZeroExtended)
|
---|
2365 | {
|
---|
2366 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
2367 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, cbZeroExtended); RT_NOREF(cbZeroExtended);
|
---|
2368 | Assert(iGRegEx < 20);
|
---|
2369 |
|
---|
2370 | /* Same discussion as in iemNativeEmitFetchGregU16 */
|
---|
2371 | uint8_t const idxGstFullReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGRegEx & 15),
|
---|
2372 | kIemNativeGstRegUse_ReadOnly);
|
---|
2373 |
|
---|
2374 | iemNativeVarSetKindToStack(pReNative, idxDstVar);
|
---|
2375 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
2376 |
|
---|
2377 | /* The value is zero-extended to the full 64-bit host register width. */
|
---|
2378 | if (iGRegEx < 16)
|
---|
2379 | off = iemNativeEmitLoadGprFromGpr8(pReNative, off, idxVarReg, idxGstFullReg);
|
---|
2380 | else
|
---|
2381 | off = iemNativeEmitLoadGprFromGpr8Hi(pReNative, off, idxVarReg, idxGstFullReg);
|
---|
2382 |
|
---|
2383 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
2384 | iemNativeRegFreeTmp(pReNative, idxGstFullReg);
|
---|
2385 | return off;
|
---|
2386 | }
|
---|
2387 |
|
---|
2388 |
|
---|
2389 | #define IEM_MC_FETCH_GREG_U8_SX_U16_THREADED(a_u16Dst, a_iGRegEx) \
|
---|
2390 | off = iemNativeEmitFetchGregU8Sx(pReNative, off, a_u16Dst, a_iGRegEx, sizeof(uint16_t))
|
---|
2391 |
|
---|
2392 | #define IEM_MC_FETCH_GREG_U8_SX_U32_THREADED(a_u32Dst, a_iGRegEx) \
|
---|
2393 | off = iemNativeEmitFetchGregU8Sx(pReNative, off, a_u32Dst, a_iGRegEx, sizeof(uint32_t))
|
---|
2394 |
|
---|
2395 | #define IEM_MC_FETCH_GREG_U8_SX_U64_THREADED(a_u64Dst, a_iGRegEx) \
|
---|
2396 | off = iemNativeEmitFetchGregU8Sx(pReNative, off, a_u64Dst, a_iGRegEx, sizeof(uint64_t))
|
---|
2397 |
|
---|
2398 | /** Emits code for IEM_MC_FETCH_GREG_U8_SX_U16/32/64_THREADED. */
|
---|
2399 | DECL_INLINE_THROW(uint32_t)
|
---|
2400 | iemNativeEmitFetchGregU8Sx(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar, uint8_t iGRegEx, uint8_t cbSignExtended)
|
---|
2401 | {
|
---|
2402 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
2403 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, cbSignExtended);
|
---|
2404 | Assert(iGRegEx < 20);
|
---|
2405 |
|
---|
2406 | /* Same discussion as in iemNativeEmitFetchGregU16 */
|
---|
2407 | uint8_t const idxGstFullReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGRegEx & 15),
|
---|
2408 | kIemNativeGstRegUse_ReadOnly);
|
---|
2409 |
|
---|
2410 | iemNativeVarSetKindToStack(pReNative, idxDstVar);
|
---|
2411 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
2412 |
|
---|
2413 | if (iGRegEx < 16)
|
---|
2414 | {
|
---|
2415 | switch (cbSignExtended)
|
---|
2416 | {
|
---|
2417 | case sizeof(uint16_t):
|
---|
2418 | off = iemNativeEmitLoadGpr16SignExtendedFromGpr8(pReNative, off, idxVarReg, idxGstFullReg);
|
---|
2419 | break;
|
---|
2420 | case sizeof(uint32_t):
|
---|
2421 | off = iemNativeEmitLoadGpr32SignExtendedFromGpr8(pReNative, off, idxVarReg, idxGstFullReg);
|
---|
2422 | break;
|
---|
2423 | case sizeof(uint64_t):
|
---|
2424 | off = iemNativeEmitLoadGprSignExtendedFromGpr8(pReNative, off, idxVarReg, idxGstFullReg);
|
---|
2425 | break;
|
---|
2426 | default: AssertFailed(); break;
|
---|
2427 | }
|
---|
2428 | }
|
---|
2429 | else
|
---|
2430 | {
|
---|
2431 | off = iemNativeEmitLoadGprFromGpr8Hi(pReNative, off, idxVarReg, idxGstFullReg);
|
---|
2432 | switch (cbSignExtended)
|
---|
2433 | {
|
---|
2434 | case sizeof(uint16_t):
|
---|
2435 | off = iemNativeEmitLoadGpr16SignExtendedFromGpr8(pReNative, off, idxVarReg, idxVarReg);
|
---|
2436 | break;
|
---|
2437 | case sizeof(uint32_t):
|
---|
2438 | off = iemNativeEmitLoadGpr32SignExtendedFromGpr8(pReNative, off, idxVarReg, idxVarReg);
|
---|
2439 | break;
|
---|
2440 | case sizeof(uint64_t):
|
---|
2441 | off = iemNativeEmitLoadGprSignExtendedFromGpr8(pReNative, off, idxVarReg, idxVarReg);
|
---|
2442 | break;
|
---|
2443 | default: AssertFailed(); break;
|
---|
2444 | }
|
---|
2445 | }
|
---|
2446 |
|
---|
2447 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
2448 | iemNativeRegFreeTmp(pReNative, idxGstFullReg);
|
---|
2449 | return off;
|
---|
2450 | }
|
---|
2451 |
|
---|
2452 |
|
---|
2453 |
|
---|
2454 | #define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) \
|
---|
2455 | off = iemNativeEmitFetchGregU16(pReNative, off, a_u16Dst, a_iGReg, sizeof(uint16_t))
|
---|
2456 |
|
---|
2457 | #define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u16Dst, a_iGReg) \
|
---|
2458 | off = iemNativeEmitFetchGregU16(pReNative, off, a_u16Dst, a_iGReg, sizeof(uint32_t))
|
---|
2459 |
|
---|
2460 | #define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u16Dst, a_iGReg) \
|
---|
2461 | off = iemNativeEmitFetchGregU16(pReNative, off, a_u16Dst, a_iGReg, sizeof(uint64_t))
|
---|
2462 |
|
---|
2463 | /** Emits code for IEM_MC_FETCH_GREG_U16 and IEM_MC_FETCH_GREG_U16_ZX_U32/64. */
|
---|
2464 | DECL_INLINE_THROW(uint32_t)
|
---|
2465 | iemNativeEmitFetchGregU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar, uint8_t iGReg, uint8_t cbZeroExtended)
|
---|
2466 | {
|
---|
2467 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
2468 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, cbZeroExtended); RT_NOREF(cbZeroExtended);
|
---|
2469 | Assert(iGReg < 16);
|
---|
2470 |
|
---|
2471 | /*
|
---|
2472 | * We can either just load the low 16-bit of the GPR into a host register
|
---|
2473 | * for the variable, or we can do so via a shadow copy host register. The
|
---|
2474 | * latter will avoid having to reload it if it's being stored later, but
|
---|
2475 | * will waste a host register if it isn't touched again. Since we don't
|
---|
2476 | * know what going to happen, we choose the latter for now.
|
---|
2477 | */
|
---|
2478 | uint8_t const idxGstFullReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg),
|
---|
2479 | kIemNativeGstRegUse_ReadOnly);
|
---|
2480 |
|
---|
2481 | iemNativeVarSetKindToStack(pReNative, idxDstVar);
|
---|
2482 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
2483 | off = iemNativeEmitLoadGprFromGpr16(pReNative, off, idxVarReg, idxGstFullReg);
|
---|
2484 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
2485 |
|
---|
2486 | iemNativeRegFreeTmp(pReNative, idxGstFullReg);
|
---|
2487 | return off;
|
---|
2488 | }
|
---|
2489 |
|
---|
2490 |
|
---|
2491 | #define IEM_MC_FETCH_GREG_U16_SX_U32(a_u16Dst, a_iGReg) \
|
---|
2492 | off = iemNativeEmitFetchGregU16Sx(pReNative, off, a_u16Dst, a_iGReg, sizeof(uint32_t))
|
---|
2493 |
|
---|
2494 | #define IEM_MC_FETCH_GREG_U16_SX_U64(a_u16Dst, a_iGReg) \
|
---|
2495 | off = iemNativeEmitFetchGregU16Sx(pReNative, off, a_u16Dst, a_iGReg, sizeof(uint64_t))
|
---|
2496 |
|
---|
2497 | /** Emits code for IEM_MC_FETCH_GREG_U16_SX_U32/64. */
|
---|
2498 | DECL_INLINE_THROW(uint32_t)
|
---|
2499 | iemNativeEmitFetchGregU16Sx(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar, uint8_t iGReg, uint8_t cbSignExtended)
|
---|
2500 | {
|
---|
2501 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
2502 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, cbSignExtended);
|
---|
2503 | Assert(iGReg < 16);
|
---|
2504 |
|
---|
2505 | /*
|
---|
2506 | * We can either just load the low 16-bit of the GPR into a host register
|
---|
2507 | * for the variable, or we can do so via a shadow copy host register. The
|
---|
2508 | * latter will avoid having to reload it if it's being stored later, but
|
---|
2509 | * will waste a host register if it isn't touched again. Since we don't
|
---|
2510 | * know what going to happen, we choose the latter for now.
|
---|
2511 | */
|
---|
2512 | uint8_t const idxGstFullReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg),
|
---|
2513 | kIemNativeGstRegUse_ReadOnly);
|
---|
2514 |
|
---|
2515 | iemNativeVarSetKindToStack(pReNative, idxDstVar);
|
---|
2516 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
2517 | if (cbSignExtended == sizeof(uint32_t))
|
---|
2518 | off = iemNativeEmitLoadGpr32SignExtendedFromGpr16(pReNative, off, idxVarReg, idxGstFullReg);
|
---|
2519 | else
|
---|
2520 | {
|
---|
2521 | Assert(cbSignExtended == sizeof(uint64_t));
|
---|
2522 | off = iemNativeEmitLoadGprSignExtendedFromGpr16(pReNative, off, idxVarReg, idxGstFullReg);
|
---|
2523 | }
|
---|
2524 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
2525 |
|
---|
2526 | iemNativeRegFreeTmp(pReNative, idxGstFullReg);
|
---|
2527 | return off;
|
---|
2528 | }
|
---|
2529 |
|
---|
2530 |
|
---|
2531 | #define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) \
|
---|
2532 | off = iemNativeEmitFetchGregU32(pReNative, off, a_u32Dst, a_iGReg, sizeof(uint32_t))
|
---|
2533 |
|
---|
2534 | #define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u32Dst, a_iGReg) \
|
---|
2535 | off = iemNativeEmitFetchGregU32(pReNative, off, a_u32Dst, a_iGReg, sizeof(uint64_t))
|
---|
2536 |
|
---|
2537 | /** Emits code for IEM_MC_FETCH_GREG_U32. */
|
---|
2538 | DECL_INLINE_THROW(uint32_t)
|
---|
2539 | iemNativeEmitFetchGregU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar, uint8_t iGReg, uint8_t cbZeroExtended)
|
---|
2540 | {
|
---|
2541 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
2542 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, cbZeroExtended); RT_NOREF(cbZeroExtended);
|
---|
2543 | Assert(iGReg < 16);
|
---|
2544 |
|
---|
2545 | /*
|
---|
2546 | * We can either just load the low 16-bit of the GPR into a host register
|
---|
2547 | * for the variable, or we can do so via a shadow copy host register. The
|
---|
2548 | * latter will avoid having to reload it if it's being stored later, but
|
---|
2549 | * will waste a host register if it isn't touched again. Since we don't
|
---|
2550 | * know what going to happen, we choose the latter for now.
|
---|
2551 | */
|
---|
2552 | uint8_t const idxGstFullReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg),
|
---|
2553 | kIemNativeGstRegUse_ReadOnly);
|
---|
2554 |
|
---|
2555 | iemNativeVarSetKindToStack(pReNative, idxDstVar);
|
---|
2556 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
2557 | off = iemNativeEmitLoadGprFromGpr32(pReNative, off, idxVarReg, idxGstFullReg);
|
---|
2558 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
2559 |
|
---|
2560 | iemNativeRegFreeTmp(pReNative, idxGstFullReg);
|
---|
2561 | return off;
|
---|
2562 | }
|
---|
2563 |
|
---|
2564 |
|
---|
2565 | #define IEM_MC_FETCH_GREG_U32_SX_U64(a_u32Dst, a_iGReg) \
|
---|
2566 | off = iemNativeEmitFetchGregU32SxU64(pReNative, off, a_u32Dst, a_iGReg)
|
---|
2567 |
|
---|
2568 | /** Emits code for IEM_MC_FETCH_GREG_U32. */
|
---|
2569 | DECL_INLINE_THROW(uint32_t)
|
---|
2570 | iemNativeEmitFetchGregU32SxU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar, uint8_t iGReg)
|
---|
2571 | {
|
---|
2572 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
2573 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, sizeof(uint64_t));
|
---|
2574 | Assert(iGReg < 16);
|
---|
2575 |
|
---|
2576 | /*
|
---|
2577 | * We can either just load the low 32-bit of the GPR into a host register
|
---|
2578 | * for the variable, or we can do so via a shadow copy host register. The
|
---|
2579 | * latter will avoid having to reload it if it's being stored later, but
|
---|
2580 | * will waste a host register if it isn't touched again. Since we don't
|
---|
2581 | * know what going to happen, we choose the latter for now.
|
---|
2582 | */
|
---|
2583 | uint8_t const idxGstFullReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg),
|
---|
2584 | kIemNativeGstRegUse_ReadOnly);
|
---|
2585 |
|
---|
2586 | iemNativeVarSetKindToStack(pReNative, idxDstVar);
|
---|
2587 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
2588 | off = iemNativeEmitLoadGprSignExtendedFromGpr32(pReNative, off, idxVarReg, idxGstFullReg);
|
---|
2589 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
2590 |
|
---|
2591 | iemNativeRegFreeTmp(pReNative, idxGstFullReg);
|
---|
2592 | return off;
|
---|
2593 | }
|
---|
2594 |
|
---|
2595 |
|
---|
2596 | #define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) \
|
---|
2597 | off = iemNativeEmitFetchGregU64(pReNative, off, a_u64Dst, a_iGReg)
|
---|
2598 |
|
---|
2599 | #define IEM_MC_FETCH_GREG_U64_ZX_U64(a_u64Dst, a_iGReg) \
|
---|
2600 | off = iemNativeEmitFetchGregU64(pReNative, off, a_u64Dst, a_iGReg)
|
---|
2601 |
|
---|
2602 | /** Emits code for IEM_MC_FETCH_GREG_U64 (and the
|
---|
2603 | * IEM_MC_FETCH_GREG_U64_ZX_U64 alias). */
|
---|
2604 | DECL_INLINE_THROW(uint32_t)
|
---|
2605 | iemNativeEmitFetchGregU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar, uint8_t iGReg)
|
---|
2606 | {
|
---|
2607 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
2608 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, sizeof(uint64_t));
|
---|
2609 | Assert(iGReg < 16);
|
---|
2610 |
|
---|
2611 | uint8_t const idxGstFullReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg),
|
---|
2612 | kIemNativeGstRegUse_ReadOnly);
|
---|
2613 |
|
---|
2614 | iemNativeVarSetKindToStack(pReNative, idxDstVar);
|
---|
2615 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
2616 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxVarReg, idxGstFullReg);
|
---|
2617 | /** @todo name the register a shadow one already? */
|
---|
2618 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
2619 |
|
---|
2620 | iemNativeRegFreeTmp(pReNative, idxGstFullReg);
|
---|
2621 | return off;
|
---|
2622 | }
|
---|
2623 |
|
---|
2624 |
|
---|
2625 |
|
---|
2626 | /*********************************************************************************************************************************
|
---|
2627 | * Emitters for general purpose register stores (IEM_MC_STORE_GREG_XXX). *
|
---|
2628 | *********************************************************************************************************************************/
|
---|
2629 |
|
---|
2630 | #define IEM_MC_STORE_GREG_U8_CONST_THREADED(a_iGRegEx, a_u8Value) \
|
---|
2631 | off = iemNativeEmitStoreGregU8Const(pReNative, off, a_iGRegEx, a_u8Value)
|
---|
2632 |
|
---|
2633 | /** Emits code for IEM_MC_STORE_GREG_U8_CONST_THREADED. */
|
---|
2634 | DECL_INLINE_THROW(uint32_t)
|
---|
2635 | iemNativeEmitStoreGregU8Const(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGRegEx, uint8_t u8Value)
|
---|
2636 | {
|
---|
2637 | Assert(iGRegEx < 20);
|
---|
2638 | uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGRegEx & 15),
|
---|
2639 | kIemNativeGstRegUse_ForUpdate);
|
---|
2640 | #ifdef RT_ARCH_AMD64
|
---|
2641 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 12);
|
---|
2642 |
|
---|
2643 | /* To the lowest byte of the register: mov r8, imm8 */
|
---|
2644 | if (iGRegEx < 16)
|
---|
2645 | {
|
---|
2646 | if (idxGstTmpReg >= 8)
|
---|
2647 | pbCodeBuf[off++] = X86_OP_REX_B;
|
---|
2648 | else if (idxGstTmpReg >= 4)
|
---|
2649 | pbCodeBuf[off++] = X86_OP_REX;
|
---|
2650 | pbCodeBuf[off++] = 0xb0 + (idxGstTmpReg & 7);
|
---|
2651 | pbCodeBuf[off++] = u8Value;
|
---|
2652 | }
|
---|
2653 | /* Otherwise it's to ah, ch, dh or bh: use mov r8, imm8 if we can, otherwise, we rotate. */
|
---|
2654 | else if (idxGstTmpReg < 4)
|
---|
2655 | {
|
---|
2656 | pbCodeBuf[off++] = 0xb4 + idxGstTmpReg;
|
---|
2657 | pbCodeBuf[off++] = u8Value;
|
---|
2658 | }
|
---|
2659 | else
|
---|
2660 | {
|
---|
2661 | /* ror reg64, 8 */
|
---|
2662 | pbCodeBuf[off++] = X86_OP_REX_W | (idxGstTmpReg < 8 ? 0 : X86_OP_REX_B);
|
---|
2663 | pbCodeBuf[off++] = 0xc1;
|
---|
2664 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 1, idxGstTmpReg & 7);
|
---|
2665 | pbCodeBuf[off++] = 8;
|
---|
2666 |
|
---|
2667 | /* mov reg8, imm8 */
|
---|
2668 | if (idxGstTmpReg >= 8)
|
---|
2669 | pbCodeBuf[off++] = X86_OP_REX_B;
|
---|
2670 | else if (idxGstTmpReg >= 4)
|
---|
2671 | pbCodeBuf[off++] = X86_OP_REX;
|
---|
2672 | pbCodeBuf[off++] = 0xb0 + (idxGstTmpReg & 7);
|
---|
2673 | pbCodeBuf[off++] = u8Value;
|
---|
2674 |
|
---|
2675 | /* rol reg64, 8 */
|
---|
2676 | pbCodeBuf[off++] = X86_OP_REX_W | (idxGstTmpReg < 8 ? 0 : X86_OP_REX_B);
|
---|
2677 | pbCodeBuf[off++] = 0xc1;
|
---|
2678 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxGstTmpReg & 7);
|
---|
2679 | pbCodeBuf[off++] = 8;
|
---|
2680 | }
|
---|
2681 |
|
---|
2682 | #elif defined(RT_ARCH_ARM64)
|
---|
2683 | uint8_t const idxImmReg = iemNativeRegAllocTmpImm(pReNative, &off, u8Value);
|
---|
2684 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
|
---|
2685 | if (iGRegEx < 16)
|
---|
2686 | /* bfi w1, w2, 0, 8 - moves bits 7:0 from idxImmReg to idxGstTmpReg bits 7:0. */
|
---|
2687 | pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxGstTmpReg, idxImmReg, 0, 8);
|
---|
2688 | else
|
---|
2689 | /* bfi w1, w2, 8, 8 - moves bits 7:0 from idxImmReg to idxGstTmpReg bits 15:8. */
|
---|
2690 | pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxGstTmpReg, idxImmReg, 8, 8);
|
---|
2691 | iemNativeRegFreeTmp(pReNative, idxImmReg);
|
---|
2692 |
|
---|
2693 | #else
|
---|
2694 | # error "Port me!"
|
---|
2695 | #endif
|
---|
2696 |
|
---|
2697 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
2698 |
|
---|
2699 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGRegEx & 15]));
|
---|
2700 |
|
---|
2701 | iemNativeRegFreeTmp(pReNative, idxGstTmpReg);
|
---|
2702 | return off;
|
---|
2703 | }
|
---|
2704 |
|
---|
2705 |
|
---|
2706 | #define IEM_MC_STORE_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
|
---|
2707 | off = iemNativeEmitStoreGregU8(pReNative, off, a_iGRegEx, a_u8Value)
|
---|
2708 |
|
---|
2709 | /** Emits code for IEM_MC_STORE_GREG_U8_THREADED. */
|
---|
2710 | DECL_INLINE_THROW(uint32_t)
|
---|
2711 | iemNativeEmitStoreGregU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGRegEx, uint8_t idxValueVar)
|
---|
2712 | {
|
---|
2713 | Assert(iGRegEx < 20);
|
---|
2714 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxValueVar);
|
---|
2715 |
|
---|
2716 | /*
|
---|
2717 | * If it's a constant value (unlikely) we treat this as a
|
---|
2718 | * IEM_MC_STORE_GREG_U8_CONST statement.
|
---|
2719 | */
|
---|
2720 | PIEMNATIVEVAR const pValueVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxValueVar)];
|
---|
2721 | if (pValueVar->enmKind == kIemNativeVarKind_Stack)
|
---|
2722 | { /* likely */ }
|
---|
2723 | else
|
---|
2724 | {
|
---|
2725 | AssertStmt(pValueVar->enmKind == kIemNativeVarKind_Immediate,
|
---|
2726 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
|
---|
2727 | return iemNativeEmitStoreGregU8Const(pReNative, off, iGRegEx, (uint8_t)pValueVar->u.uValue);
|
---|
2728 | }
|
---|
2729 |
|
---|
2730 | uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGRegEx & 15),
|
---|
2731 | kIemNativeGstRegUse_ForUpdate);
|
---|
2732 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxValueVar, &off, true /*fInitialized*/);
|
---|
2733 |
|
---|
2734 | #ifdef RT_ARCH_AMD64
|
---|
2735 | /* To the lowest byte of the register: mov reg8, reg8(r/m) */
|
---|
2736 | if (iGRegEx < 16)
|
---|
2737 | {
|
---|
2738 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
|
---|
2739 | if (idxGstTmpReg >= 8 || idxVarReg >= 8)
|
---|
2740 | pbCodeBuf[off++] = (idxGstTmpReg >= 8 ? X86_OP_REX_R : 0) | (idxVarReg >= 8 ? X86_OP_REX_B : 0);
|
---|
2741 | else if (idxGstTmpReg >= 4 || idxVarReg >= 4)
|
---|
2742 | pbCodeBuf[off++] = X86_OP_REX;
|
---|
2743 | pbCodeBuf[off++] = 0x8a;
|
---|
2744 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxGstTmpReg & 7, idxVarReg & 7);
|
---|
2745 | }
|
---|
2746 | /* Otherwise it's to ah, ch, dh or bh from al, cl, dl or bl: use mov r8, r8 if we can, otherwise, we rotate. */
|
---|
2747 | else if (idxGstTmpReg < 4 && idxVarReg < 4)
|
---|
2748 | {
|
---|
2749 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2+1);
|
---|
2750 | pbCodeBuf[off++] = 0x8a;
|
---|
2751 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxGstTmpReg + 4, idxVarReg);
|
---|
2752 | }
|
---|
2753 | else
|
---|
2754 | {
|
---|
2755 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 15);
|
---|
2756 |
|
---|
2757 | /* ror reg64, 8 */
|
---|
2758 | pbCodeBuf[off++] = X86_OP_REX_W | (idxGstTmpReg < 8 ? 0 : X86_OP_REX_B);
|
---|
2759 | pbCodeBuf[off++] = 0xc1;
|
---|
2760 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 1, idxGstTmpReg & 7);
|
---|
2761 | pbCodeBuf[off++] = 8;
|
---|
2762 |
|
---|
2763 | /* mov reg8, reg8(r/m) */
|
---|
2764 | if (idxGstTmpReg >= 8 || idxVarReg >= 8)
|
---|
2765 | pbCodeBuf[off++] = (idxGstTmpReg >= 8 ? X86_OP_REX_R : 0) | (idxVarReg >= 8 ? X86_OP_REX_B : 0);
|
---|
2766 | else if (idxGstTmpReg >= 4 || idxVarReg >= 4)
|
---|
2767 | pbCodeBuf[off++] = X86_OP_REX;
|
---|
2768 | pbCodeBuf[off++] = 0x8a;
|
---|
2769 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxGstTmpReg & 7, idxVarReg & 7);
|
---|
2770 |
|
---|
2771 | /* rol reg64, 8 */
|
---|
2772 | pbCodeBuf[off++] = X86_OP_REX_W | (idxGstTmpReg < 8 ? 0 : X86_OP_REX_B);
|
---|
2773 | pbCodeBuf[off++] = 0xc1;
|
---|
2774 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxGstTmpReg & 7);
|
---|
2775 | pbCodeBuf[off++] = 8;
|
---|
2776 | }
|
---|
2777 |
|
---|
2778 | #elif defined(RT_ARCH_ARM64)
|
---|
2779 | /* bfi w1, w2, 0, 8 - moves bits 7:0 from idxVarReg to idxGstTmpReg bits 7:0.
|
---|
2780 | or
|
---|
2781 | bfi w1, w2, 8, 8 - moves bits 7:0 from idxVarReg to idxGstTmpReg bits 15:8. */
|
---|
2782 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
2783 | if (iGRegEx < 16)
|
---|
2784 | pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxGstTmpReg, idxVarReg, 0, 8);
|
---|
2785 | else
|
---|
2786 | pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxGstTmpReg, idxVarReg, 8, 8);
|
---|
2787 |
|
---|
2788 | #else
|
---|
2789 | # error "Port me!"
|
---|
2790 | #endif
|
---|
2791 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
2792 |
|
---|
2793 | iemNativeVarRegisterRelease(pReNative, idxValueVar);
|
---|
2794 |
|
---|
2795 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGRegEx & 15]));
|
---|
2796 | iemNativeRegFreeTmp(pReNative, idxGstTmpReg);
|
---|
2797 | return off;
|
---|
2798 | }
|
---|
2799 |
|
---|
2800 |
|
---|
2801 |
|
---|
2802 | #define IEM_MC_STORE_GREG_U16_CONST(a_iGReg, a_u16Const) \
|
---|
2803 | off = iemNativeEmitStoreGregU16Const(pReNative, off, a_iGReg, a_u16Const)
|
---|
2804 |
|
---|
2805 | /** Emits code for IEM_MC_STORE_GREG_U16. */
|
---|
2806 | DECL_INLINE_THROW(uint32_t)
|
---|
2807 | iemNativeEmitStoreGregU16Const(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint16_t uValue)
|
---|
2808 | {
|
---|
2809 | Assert(iGReg < 16);
|
---|
2810 | uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg),
|
---|
2811 | kIemNativeGstRegUse_ForUpdate);
|
---|
2812 | #ifdef RT_ARCH_AMD64
|
---|
2813 | /* mov reg16, imm16 */
|
---|
2814 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);
|
---|
2815 | pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
|
---|
2816 | if (idxGstTmpReg >= 8)
|
---|
2817 | pbCodeBuf[off++] = X86_OP_REX_B;
|
---|
2818 | pbCodeBuf[off++] = 0xb8 + (idxGstTmpReg & 7);
|
---|
2819 | pbCodeBuf[off++] = RT_BYTE1(uValue);
|
---|
2820 | pbCodeBuf[off++] = RT_BYTE2(uValue);
|
---|
2821 |
|
---|
2822 | #elif defined(RT_ARCH_ARM64)
|
---|
2823 | /* movk xdst, #uValue, lsl #0 */
|
---|
2824 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
2825 | pu32CodeBuf[off++] = Armv8A64MkInstrMovK(idxGstTmpReg, uValue);
|
---|
2826 |
|
---|
2827 | #else
|
---|
2828 | # error "Port me!"
|
---|
2829 | #endif
|
---|
2830 |
|
---|
2831 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
2832 |
|
---|
2833 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg]));
|
---|
2834 | iemNativeRegFreeTmp(pReNative, idxGstTmpReg);
|
---|
2835 | return off;
|
---|
2836 | }
|
---|
2837 |
|
---|
2838 |
|
---|
2839 | #define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) \
|
---|
2840 | off = iemNativeEmitStoreGregU16(pReNative, off, a_iGReg, a_u16Value)
|
---|
2841 |
|
---|
2842 | /** Emits code for IEM_MC_STORE_GREG_U16. */
|
---|
2843 | DECL_INLINE_THROW(uint32_t)
|
---|
2844 | iemNativeEmitStoreGregU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint8_t idxValueVar)
|
---|
2845 | {
|
---|
2846 | Assert(iGReg < 16);
|
---|
2847 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxValueVar);
|
---|
2848 |
|
---|
2849 | /*
|
---|
2850 | * If it's a constant value (unlikely) we treat this as a
|
---|
2851 | * IEM_MC_STORE_GREG_U16_CONST statement.
|
---|
2852 | */
|
---|
2853 | PIEMNATIVEVAR const pValueVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxValueVar)];
|
---|
2854 | if (pValueVar->enmKind == kIemNativeVarKind_Stack)
|
---|
2855 | { /* likely */ }
|
---|
2856 | else
|
---|
2857 | {
|
---|
2858 | AssertStmt(pValueVar->enmKind == kIemNativeVarKind_Immediate,
|
---|
2859 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
|
---|
2860 | return iemNativeEmitStoreGregU16Const(pReNative, off, iGReg, (uint16_t)pValueVar->u.uValue);
|
---|
2861 | }
|
---|
2862 |
|
---|
2863 | uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg),
|
---|
2864 | kIemNativeGstRegUse_ForUpdate);
|
---|
2865 |
|
---|
2866 | #ifdef RT_ARCH_AMD64
|
---|
2867 | /* mov reg16, reg16 or [mem16] */
|
---|
2868 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 12);
|
---|
2869 | pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
|
---|
2870 | if (pValueVar->idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
|
---|
2871 | {
|
---|
2872 | if (idxGstTmpReg >= 8 || pValueVar->idxReg >= 8)
|
---|
2873 | pbCodeBuf[off++] = (idxGstTmpReg >= 8 ? X86_OP_REX_R : 0)
|
---|
2874 | | (pValueVar->idxReg >= 8 ? X86_OP_REX_B : 0);
|
---|
2875 | pbCodeBuf[off++] = 0x8b;
|
---|
2876 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxGstTmpReg & 7, pValueVar->idxReg & 7);
|
---|
2877 | }
|
---|
2878 | else
|
---|
2879 | {
|
---|
2880 | uint8_t const idxStackSlot = pValueVar->idxStackSlot;
|
---|
2881 | AssertStmt(idxStackSlot != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_NOT_INITIALIZED));
|
---|
2882 | if (idxGstTmpReg >= 8)
|
---|
2883 | pbCodeBuf[off++] = X86_OP_REX_R;
|
---|
2884 | pbCodeBuf[off++] = 0x8b;
|
---|
2885 | off = iemNativeEmitGprByBpDisp(pbCodeBuf, off, idxGstTmpReg, iemNativeStackCalcBpDisp(idxStackSlot), pReNative);
|
---|
2886 | }
|
---|
2887 |
|
---|
2888 | #elif defined(RT_ARCH_ARM64)
|
---|
2889 | /* bfi w1, w2, 0, 16 - moves bits 15:0 from idxVarReg to idxGstTmpReg bits 15:0. */
|
---|
2890 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxValueVar, &off, true /*fInitialized*/);
|
---|
2891 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
2892 | pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxGstTmpReg, idxVarReg, 0, 16);
|
---|
2893 | iemNativeVarRegisterRelease(pReNative, idxValueVar);
|
---|
2894 |
|
---|
2895 | #else
|
---|
2896 | # error "Port me!"
|
---|
2897 | #endif
|
---|
2898 |
|
---|
2899 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
2900 |
|
---|
2901 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg]));
|
---|
2902 | iemNativeRegFreeTmp(pReNative, idxGstTmpReg);
|
---|
2903 | return off;
|
---|
2904 | }
|
---|
2905 |
|
---|
2906 |
|
---|
2907 | #define IEM_MC_STORE_GREG_U32_CONST(a_iGReg, a_u32Const) \
|
---|
2908 | off = iemNativeEmitStoreGregU32Const(pReNative, off, a_iGReg, a_u32Const)
|
---|
2909 |
|
---|
2910 | /** Emits code for IEM_MC_STORE_GREG_U32_CONST. */
|
---|
2911 | DECL_INLINE_THROW(uint32_t)
|
---|
2912 | iemNativeEmitStoreGregU32Const(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint32_t uValue)
|
---|
2913 | {
|
---|
2914 | Assert(iGReg < 16);
|
---|
2915 | uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg),
|
---|
2916 | kIemNativeGstRegUse_ForFullWrite);
|
---|
2917 | off = iemNativeEmitLoadGprImm64(pReNative, off, idxGstTmpReg, uValue);
|
---|
2918 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg]));
|
---|
2919 | iemNativeRegFreeTmp(pReNative, idxGstTmpReg);
|
---|
2920 | return off;
|
---|
2921 | }
|
---|
2922 |
|
---|
2923 |
|
---|
2924 | #define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) \
|
---|
2925 | off = iemNativeEmitStoreGregU32(pReNative, off, a_iGReg, a_u32Value)
|
---|
2926 |
|
---|
2927 | /** Emits code for IEM_MC_STORE_GREG_U32. */
|
---|
2928 | DECL_INLINE_THROW(uint32_t)
|
---|
2929 | iemNativeEmitStoreGregU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint8_t idxValueVar)
|
---|
2930 | {
|
---|
2931 | Assert(iGReg < 16);
|
---|
2932 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxValueVar);
|
---|
2933 |
|
---|
2934 | /*
|
---|
2935 | * If it's a constant value (unlikely) we treat this as a
|
---|
2936 | * IEM_MC_STORE_GREG_U32_CONST statement.
|
---|
2937 | */
|
---|
2938 | PIEMNATIVEVAR const pValueVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxValueVar)];
|
---|
2939 | if (pValueVar->enmKind == kIemNativeVarKind_Stack)
|
---|
2940 | { /* likely */ }
|
---|
2941 | else
|
---|
2942 | {
|
---|
2943 | AssertStmt(pValueVar->enmKind == kIemNativeVarKind_Immediate,
|
---|
2944 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
|
---|
2945 | return iemNativeEmitStoreGregU32Const(pReNative, off, iGReg, (uint32_t)pValueVar->u.uValue);
|
---|
2946 | }
|
---|
2947 |
|
---|
2948 | /*
|
---|
2949 | * For the rest we allocate a guest register for the variable and writes
|
---|
2950 | * it to the CPUMCTX structure.
|
---|
2951 | */
|
---|
2952 | uint8_t const idxVarReg = iemNativeVarRegisterAcquireForGuestReg(pReNative, idxValueVar, IEMNATIVEGSTREG_GPR(iGReg), &off);
|
---|
2953 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxVarReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg]));
|
---|
2954 | #ifdef VBOX_STRICT
|
---|
2955 | off = iemNativeEmitTop32BitsClearCheck(pReNative, off, idxVarReg);
|
---|
2956 | #endif
|
---|
2957 | iemNativeVarRegisterRelease(pReNative, idxValueVar);
|
---|
2958 | return off;
|
---|
2959 | }
|
---|
2960 |
|
---|
2961 |
|
---|
2962 | #define IEM_MC_STORE_GREG_U64_CONST(a_iGReg, a_u64Const) \
|
---|
2963 | off = iemNativeEmitStoreGregU64Const(pReNative, off, a_iGReg, a_u64Const)
|
---|
2964 |
|
---|
2965 | /** Emits code for IEM_MC_STORE_GREG_U64_CONST. */
|
---|
2966 | DECL_INLINE_THROW(uint32_t)
|
---|
2967 | iemNativeEmitStoreGregU64Const(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint64_t uValue)
|
---|
2968 | {
|
---|
2969 | Assert(iGReg < 16);
|
---|
2970 | uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg),
|
---|
2971 | kIemNativeGstRegUse_ForFullWrite);
|
---|
2972 | off = iemNativeEmitLoadGprImm64(pReNative, off, idxGstTmpReg, uValue);
|
---|
2973 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg]));
|
---|
2974 | iemNativeRegFreeTmp(pReNative, idxGstTmpReg);
|
---|
2975 | return off;
|
---|
2976 | }
|
---|
2977 |
|
---|
2978 |
|
---|
2979 | #define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) \
|
---|
2980 | off = iemNativeEmitStoreGregU64(pReNative, off, a_iGReg, a_u64Value)
|
---|
2981 |
|
---|
2982 | /** Emits code for IEM_MC_STORE_GREG_U64. */
|
---|
2983 | DECL_INLINE_THROW(uint32_t)
|
---|
2984 | iemNativeEmitStoreGregU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint8_t idxValueVar)
|
---|
2985 | {
|
---|
2986 | Assert(iGReg < 16);
|
---|
2987 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxValueVar);
|
---|
2988 |
|
---|
2989 | /*
|
---|
2990 | * If it's a constant value (unlikely) we treat this as a
|
---|
2991 | * IEM_MC_STORE_GREG_U64_CONST statement.
|
---|
2992 | */
|
---|
2993 | PIEMNATIVEVAR const pValueVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxValueVar)];
|
---|
2994 | if (pValueVar->enmKind == kIemNativeVarKind_Stack)
|
---|
2995 | { /* likely */ }
|
---|
2996 | else
|
---|
2997 | {
|
---|
2998 | AssertStmt(pValueVar->enmKind == kIemNativeVarKind_Immediate,
|
---|
2999 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
|
---|
3000 | return iemNativeEmitStoreGregU64Const(pReNative, off, iGReg, pValueVar->u.uValue);
|
---|
3001 | }
|
---|
3002 |
|
---|
3003 | /*
|
---|
3004 | * For the rest we allocate a guest register for the variable and writes
|
---|
3005 | * it to the CPUMCTX structure.
|
---|
3006 | */
|
---|
3007 | uint8_t const idxVarReg = iemNativeVarRegisterAcquireForGuestReg(pReNative, idxValueVar, IEMNATIVEGSTREG_GPR(iGReg), &off);
|
---|
3008 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxVarReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg]));
|
---|
3009 | iemNativeVarRegisterRelease(pReNative, idxValueVar);
|
---|
3010 | return off;
|
---|
3011 | }
|
---|
3012 |
|
---|
3013 |
|
---|
3014 | #define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) \
|
---|
3015 | off = iemNativeEmitClearHighGregU64(pReNative, off, a_iGReg)
|
---|
3016 |
|
---|
3017 | /** Emits code for IEM_MC_CLEAR_HIGH_GREG_U64. */
|
---|
3018 | DECL_INLINE_THROW(uint32_t)
|
---|
3019 | iemNativeEmitClearHighGregU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg)
|
---|
3020 | {
|
---|
3021 | Assert(iGReg < 16);
|
---|
3022 | uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg),
|
---|
3023 | kIemNativeGstRegUse_ForUpdate);
|
---|
3024 | off = iemNativeEmitLoadGprFromGpr32(pReNative, off, idxGstTmpReg, idxGstTmpReg);
|
---|
3025 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg]));
|
---|
3026 | iemNativeRegFreeTmp(pReNative, idxGstTmpReg);
|
---|
3027 | return off;
|
---|
3028 | }
|
---|
3029 |
|
---|
3030 |
|
---|
3031 | /*********************************************************************************************************************************
|
---|
3032 | * General purpose register manipulation (add, sub). *
|
---|
3033 | *********************************************************************************************************************************/
|
---|
3034 |
|
---|
3035 | #define IEM_MC_ADD_GREG_U16(a_iGReg, a_u8SubtrahendConst) \
|
---|
3036 | off = iemNativeEmitAddGregU16(pReNative, off, a_iGReg, a_u8SubtrahendConst)
|
---|
3037 |
|
---|
3038 | /** Emits code for IEM_MC_ADD_GREG_U16. */
|
---|
3039 | DECL_INLINE_THROW(uint32_t)
|
---|
3040 | iemNativeEmitAddGregU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint8_t uAddend)
|
---|
3041 | {
|
---|
3042 | uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg),
|
---|
3043 | kIemNativeGstRegUse_ForUpdate);
|
---|
3044 |
|
---|
3045 | #ifdef RT_ARCH_AMD64
|
---|
3046 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6);
|
---|
3047 | pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
|
---|
3048 | if (idxGstTmpReg >= 8)
|
---|
3049 | pbCodeBuf[off++] = X86_OP_REX_B;
|
---|
3050 | if (uAddend == 1)
|
---|
3051 | {
|
---|
3052 | pbCodeBuf[off++] = 0xff; /* inc */
|
---|
3053 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxGstTmpReg & 7);
|
---|
3054 | }
|
---|
3055 | else
|
---|
3056 | {
|
---|
3057 | pbCodeBuf[off++] = 0x81;
|
---|
3058 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxGstTmpReg & 7);
|
---|
3059 | pbCodeBuf[off++] = uAddend;
|
---|
3060 | pbCodeBuf[off++] = 0;
|
---|
3061 | }
|
---|
3062 |
|
---|
3063 | #else
|
---|
3064 | uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
|
---|
3065 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
|
---|
3066 |
|
---|
3067 | /* sub tmp, gstgrp, uAddend */
|
---|
3068 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, idxTmpReg, idxGstTmpReg, uAddend, false /*f64Bit*/);
|
---|
3069 |
|
---|
3070 | /* bfi w1, w2, 0, 16 - moves bits 15:0 from tmpreg2 to tmpreg. */
|
---|
3071 | pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxGstTmpReg, idxTmpReg, 0, 16);
|
---|
3072 |
|
---|
3073 | iemNativeRegFreeTmp(pReNative, idxTmpReg);
|
---|
3074 | #endif
|
---|
3075 |
|
---|
3076 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
3077 |
|
---|
3078 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg]));
|
---|
3079 |
|
---|
3080 | iemNativeRegFreeTmp(pReNative, idxGstTmpReg);
|
---|
3081 | return off;
|
---|
3082 | }
|
---|
3083 |
|
---|
3084 |
|
---|
3085 | #define IEM_MC_ADD_GREG_U32(a_iGReg, a_u8Const) \
|
---|
3086 | off = iemNativeEmitAddGregU32U64(pReNative, off, a_iGReg, a_u8Const, false /*f64Bit*/)
|
---|
3087 |
|
---|
3088 | #define IEM_MC_ADD_GREG_U64(a_iGReg, a_u8Const) \
|
---|
3089 | off = iemNativeEmitAddGregU32U64(pReNative, off, a_iGReg, a_u8Const, true /*f64Bit*/)
|
---|
3090 |
|
---|
3091 | /** Emits code for IEM_MC_ADD_GREG_U32 and IEM_MC_ADD_GREG_U64. */
|
---|
3092 | DECL_INLINE_THROW(uint32_t)
|
---|
3093 | iemNativeEmitAddGregU32U64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint8_t uAddend, bool f64Bit)
|
---|
3094 | {
|
---|
3095 | uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg),
|
---|
3096 | kIemNativeGstRegUse_ForUpdate);
|
---|
3097 |
|
---|
3098 | #ifdef RT_ARCH_AMD64
|
---|
3099 | uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
|
---|
3100 | if (f64Bit)
|
---|
3101 | pbCodeBuf[off++] = X86_OP_REX_W | (idxGstTmpReg >= 8 ? X86_OP_REX_B : 0);
|
---|
3102 | else if (idxGstTmpReg >= 8)
|
---|
3103 | pbCodeBuf[off++] = X86_OP_REX_B;
|
---|
3104 | if (uAddend == 1)
|
---|
3105 | {
|
---|
3106 | pbCodeBuf[off++] = 0xff; /* inc */
|
---|
3107 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxGstTmpReg & 7);
|
---|
3108 | }
|
---|
3109 | else if (uAddend < 128)
|
---|
3110 | {
|
---|
3111 | pbCodeBuf[off++] = 0x83; /* add */
|
---|
3112 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxGstTmpReg & 7);
|
---|
3113 | pbCodeBuf[off++] = RT_BYTE1(uAddend);
|
---|
3114 | }
|
---|
3115 | else
|
---|
3116 | {
|
---|
3117 | pbCodeBuf[off++] = 0x81; /* add */
|
---|
3118 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxGstTmpReg & 7);
|
---|
3119 | pbCodeBuf[off++] = RT_BYTE1(uAddend);
|
---|
3120 | pbCodeBuf[off++] = 0;
|
---|
3121 | pbCodeBuf[off++] = 0;
|
---|
3122 | pbCodeBuf[off++] = 0;
|
---|
3123 | }
|
---|
3124 |
|
---|
3125 | #else
|
---|
3126 | /* sub tmp, gstgrp, uAddend */
|
---|
3127 | uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
3128 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, idxGstTmpReg, idxGstTmpReg, uAddend, f64Bit);
|
---|
3129 |
|
---|
3130 | #endif
|
---|
3131 |
|
---|
3132 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
3133 |
|
---|
3134 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg]));
|
---|
3135 |
|
---|
3136 | iemNativeRegFreeTmp(pReNative, idxGstTmpReg);
|
---|
3137 | return off;
|
---|
3138 | }
|
---|
3139 |
|
---|
3140 |
|
---|
3141 |
|
---|
3142 | #define IEM_MC_SUB_GREG_U16(a_iGReg, a_u8SubtrahendConst) \
|
---|
3143 | off = iemNativeEmitSubGregU16(pReNative, off, a_iGReg, a_u8SubtrahendConst)
|
---|
3144 |
|
---|
3145 | /** Emits code for IEM_MC_SUB_GREG_U16. */
|
---|
3146 | DECL_INLINE_THROW(uint32_t)
|
---|
3147 | iemNativeEmitSubGregU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint8_t uSubtrahend)
|
---|
3148 | {
|
---|
3149 | uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg),
|
---|
3150 | kIemNativeGstRegUse_ForUpdate);
|
---|
3151 |
|
---|
3152 | #ifdef RT_ARCH_AMD64
|
---|
3153 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6);
|
---|
3154 | pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
|
---|
3155 | if (idxGstTmpReg >= 8)
|
---|
3156 | pbCodeBuf[off++] = X86_OP_REX_B;
|
---|
3157 | if (uSubtrahend == 1)
|
---|
3158 | {
|
---|
3159 | pbCodeBuf[off++] = 0xff; /* dec */
|
---|
3160 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 1, idxGstTmpReg & 7);
|
---|
3161 | }
|
---|
3162 | else
|
---|
3163 | {
|
---|
3164 | pbCodeBuf[off++] = 0x81;
|
---|
3165 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, idxGstTmpReg & 7);
|
---|
3166 | pbCodeBuf[off++] = uSubtrahend;
|
---|
3167 | pbCodeBuf[off++] = 0;
|
---|
3168 | }
|
---|
3169 |
|
---|
3170 | #else
|
---|
3171 | uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
|
---|
3172 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
|
---|
3173 |
|
---|
3174 | /* sub tmp, gstgrp, uSubtrahend */
|
---|
3175 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, idxTmpReg, idxGstTmpReg, uSubtrahend, false /*f64Bit*/);
|
---|
3176 |
|
---|
3177 | /* bfi w1, w2, 0, 16 - moves bits 15:0 from tmpreg2 to tmpreg. */
|
---|
3178 | pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxGstTmpReg, idxTmpReg, 0, 16);
|
---|
3179 |
|
---|
3180 | iemNativeRegFreeTmp(pReNative, idxTmpReg);
|
---|
3181 | #endif
|
---|
3182 |
|
---|
3183 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
3184 |
|
---|
3185 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg]));
|
---|
3186 |
|
---|
3187 | iemNativeRegFreeTmp(pReNative, idxGstTmpReg);
|
---|
3188 | return off;
|
---|
3189 | }
|
---|
3190 |
|
---|
3191 |
|
---|
3192 | #define IEM_MC_SUB_GREG_U32(a_iGReg, a_u8Const) \
|
---|
3193 | off = iemNativeEmitSubGregU32U64(pReNative, off, a_iGReg, a_u8Const, false /*f64Bit*/)
|
---|
3194 |
|
---|
3195 | #define IEM_MC_SUB_GREG_U64(a_iGReg, a_u8Const) \
|
---|
3196 | off = iemNativeEmitSubGregU32U64(pReNative, off, a_iGReg, a_u8Const, true /*f64Bit*/)
|
---|
3197 |
|
---|
3198 | /** Emits code for IEM_MC_SUB_GREG_U32 and IEM_MC_SUB_GREG_U64. */
|
---|
3199 | DECL_INLINE_THROW(uint32_t)
|
---|
3200 | iemNativeEmitSubGregU32U64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint8_t uSubtrahend, bool f64Bit)
|
---|
3201 | {
|
---|
3202 | uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg),
|
---|
3203 | kIemNativeGstRegUse_ForUpdate);
|
---|
3204 |
|
---|
3205 | #ifdef RT_ARCH_AMD64
|
---|
3206 | uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
|
---|
3207 | if (f64Bit)
|
---|
3208 | pbCodeBuf[off++] = X86_OP_REX_W | (idxGstTmpReg >= 8 ? X86_OP_REX_B : 0);
|
---|
3209 | else if (idxGstTmpReg >= 8)
|
---|
3210 | pbCodeBuf[off++] = X86_OP_REX_B;
|
---|
3211 | if (uSubtrahend == 1)
|
---|
3212 | {
|
---|
3213 | pbCodeBuf[off++] = 0xff; /* dec */
|
---|
3214 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 1, idxGstTmpReg & 7);
|
---|
3215 | }
|
---|
3216 | else if (uSubtrahend < 128)
|
---|
3217 | {
|
---|
3218 | pbCodeBuf[off++] = 0x83; /* sub */
|
---|
3219 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, idxGstTmpReg & 7);
|
---|
3220 | pbCodeBuf[off++] = RT_BYTE1(uSubtrahend);
|
---|
3221 | }
|
---|
3222 | else
|
---|
3223 | {
|
---|
3224 | pbCodeBuf[off++] = 0x81; /* sub */
|
---|
3225 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, idxGstTmpReg & 7);
|
---|
3226 | pbCodeBuf[off++] = RT_BYTE1(uSubtrahend);
|
---|
3227 | pbCodeBuf[off++] = 0;
|
---|
3228 | pbCodeBuf[off++] = 0;
|
---|
3229 | pbCodeBuf[off++] = 0;
|
---|
3230 | }
|
---|
3231 |
|
---|
3232 | #else
|
---|
3233 | /* sub tmp, gstgrp, uSubtrahend */
|
---|
3234 | uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
3235 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, idxGstTmpReg, idxGstTmpReg, uSubtrahend, f64Bit);
|
---|
3236 |
|
---|
3237 | #endif
|
---|
3238 |
|
---|
3239 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
3240 |
|
---|
3241 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg]));
|
---|
3242 |
|
---|
3243 | iemNativeRegFreeTmp(pReNative, idxGstTmpReg);
|
---|
3244 | return off;
|
---|
3245 | }
|
---|
3246 |
|
---|
3247 |
|
---|
3248 | /*********************************************************************************************************************************
|
---|
3249 | * Local variable manipulation (add, sub, and, or). *
|
---|
3250 | *********************************************************************************************************************************/
|
---|
3251 |
|
---|
3252 | #define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) \
|
---|
3253 | off = iemNativeEmitAndLocal(pReNative, off, a_u8Local, a_u8Mask, sizeof(uint8_t))
|
---|
3254 |
|
---|
3255 | #define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) \
|
---|
3256 | off = iemNativeEmitAndLocal(pReNative, off, a_u16Local, a_u16Mask, sizeof(uint16_t))
|
---|
3257 |
|
---|
3258 | #define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) \
|
---|
3259 | off = iemNativeEmitAndLocal(pReNative, off, a_u32Local, a_u32Mask, sizeof(uint32_t))
|
---|
3260 |
|
---|
3261 | #define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) \
|
---|
3262 | off = iemNativeEmitAndLocal(pReNative, off, a_u64Local, a_u64Mask, sizeof(uint64_t))
|
---|
3263 |
|
---|
3264 | /** Emits code for AND'ing a local and a constant value. */
|
---|
3265 | DECL_INLINE_THROW(uint32_t)
|
---|
3266 | iemNativeEmitAndLocal(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVar, uint64_t uMask, uint8_t cbMask)
|
---|
3267 | {
|
---|
3268 | #ifdef VBOX_STRICT
|
---|
3269 | switch (cbMask)
|
---|
3270 | {
|
---|
3271 | case sizeof(uint8_t): Assert((uint8_t)uMask == uMask); break;
|
---|
3272 | case sizeof(uint16_t): Assert((uint16_t)uMask == uMask); break;
|
---|
3273 | case sizeof(uint32_t): Assert((uint32_t)uMask == uMask); break;
|
---|
3274 | case sizeof(uint64_t): break;
|
---|
3275 | default: AssertFailedBreak();
|
---|
3276 | }
|
---|
3277 | #endif
|
---|
3278 |
|
---|
3279 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxVar, &off, true /*fInitialized*/);
|
---|
3280 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVar, cbMask);
|
---|
3281 |
|
---|
3282 | if (cbMask <= sizeof(uint32_t))
|
---|
3283 | off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxVarReg, uMask);
|
---|
3284 | else
|
---|
3285 | off = iemNativeEmitAndGprByImm(pReNative, off, idxVarReg, uMask);
|
---|
3286 |
|
---|
3287 | iemNativeVarRegisterRelease(pReNative, idxVar);
|
---|
3288 | return off;
|
---|
3289 | }
|
---|
3290 |
|
---|
3291 |
|
---|
3292 | #define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) \
|
---|
3293 | off = iemNativeEmitOrLocal(pReNative, off, a_u8Local, a_u8Mask, sizeof(uint8_t))
|
---|
3294 |
|
---|
3295 | #define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) \
|
---|
3296 | off = iemNativeEmitOrLocal(pReNative, off, a_u16Local, a_u16Mask, sizeof(uint16_t))
|
---|
3297 |
|
---|
3298 | #define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) \
|
---|
3299 | off = iemNativeEmitOrLocal(pReNative, off, a_u32Local, a_u32Mask, sizeof(uint32_t))
|
---|
3300 |
|
---|
3301 | #define IEM_MC_OR_LOCAL_U64(a_u64Local, a_u64Mask) \
|
---|
3302 | off = iemNativeEmitOrLocal(pReNative, off, a_u64Local, a_u64Mask, sizeof(uint64_t))
|
---|
3303 |
|
---|
3304 | /** Emits code for OR'ing a local and a constant value. */
|
---|
3305 | DECL_INLINE_THROW(uint32_t)
|
---|
3306 | iemNativeEmitOrLocal(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVar, uint64_t uMask, uint8_t cbMask)
|
---|
3307 | {
|
---|
3308 | #ifdef VBOX_STRICT
|
---|
3309 | switch (cbMask)
|
---|
3310 | {
|
---|
3311 | case sizeof(uint8_t): Assert((uint8_t)uMask == uMask); break;
|
---|
3312 | case sizeof(uint16_t): Assert((uint16_t)uMask == uMask); break;
|
---|
3313 | case sizeof(uint32_t): Assert((uint32_t)uMask == uMask); break;
|
---|
3314 | case sizeof(uint64_t): break;
|
---|
3315 | default: AssertFailedBreak();
|
---|
3316 | }
|
---|
3317 | #endif
|
---|
3318 |
|
---|
3319 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxVar, &off, true /*fInitialized*/);
|
---|
3320 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVar, cbMask);
|
---|
3321 |
|
---|
3322 | if (cbMask <= sizeof(uint32_t))
|
---|
3323 | off = iemNativeEmitOrGpr32ByImm(pReNative, off, idxVarReg, uMask);
|
---|
3324 | else
|
---|
3325 | off = iemNativeEmitOrGprByImm(pReNative, off, idxVarReg, uMask);
|
---|
3326 |
|
---|
3327 | iemNativeVarRegisterRelease(pReNative, idxVar);
|
---|
3328 | return off;
|
---|
3329 | }
|
---|
3330 |
|
---|
3331 |
|
---|
3332 | #define IEM_MC_BSWAP_LOCAL_U16(a_u16Local) \
|
---|
3333 | off = iemNativeEmitBswapLocal(pReNative, off, a_u16Local, sizeof(uint16_t))
|
---|
3334 |
|
---|
3335 | #define IEM_MC_BSWAP_LOCAL_U32(a_u32Local) \
|
---|
3336 | off = iemNativeEmitBswapLocal(pReNative, off, a_u32Local, sizeof(uint32_t))
|
---|
3337 |
|
---|
3338 | #define IEM_MC_BSWAP_LOCAL_U64(a_u64Local) \
|
---|
3339 | off = iemNativeEmitBswapLocal(pReNative, off, a_u64Local, sizeof(uint64_t))
|
---|
3340 |
|
---|
3341 | /** Emits code for reversing the byte order in a local value. */
|
---|
3342 | DECL_INLINE_THROW(uint32_t)
|
---|
3343 | iemNativeEmitBswapLocal(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVar, uint8_t cbLocal)
|
---|
3344 | {
|
---|
3345 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxVar, &off, true /*fInitialized*/);
|
---|
3346 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVar, cbLocal);
|
---|
3347 |
|
---|
3348 | switch (cbLocal)
|
---|
3349 | {
|
---|
3350 | case sizeof(uint16_t): off = iemNativeEmitBswapGpr16(pReNative, off, idxVarReg); break;
|
---|
3351 | case sizeof(uint32_t): off = iemNativeEmitBswapGpr32(pReNative, off, idxVarReg); break;
|
---|
3352 | case sizeof(uint64_t): off = iemNativeEmitBswapGpr(pReNative, off, idxVarReg); break;
|
---|
3353 | default: AssertFailedBreak();
|
---|
3354 | }
|
---|
3355 |
|
---|
3356 | iemNativeVarRegisterRelease(pReNative, idxVar);
|
---|
3357 | return off;
|
---|
3358 | }
|
---|
3359 |
|
---|
3360 |
|
---|
3361 |
|
---|
3362 | /*********************************************************************************************************************************
|
---|
3363 | * EFLAGS *
|
---|
3364 | *********************************************************************************************************************************/
|
---|
3365 |
|
---|
3366 | #if !defined(VBOX_WITH_STATISTICS) || !defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS)
|
---|
3367 | # define IEMNATIVE_EFLAGS_OPTIMIZATION_STATS(a_fEflInput, a_fEflOutput) ((void)0)
|
---|
3368 | #else
|
---|
3369 | # define IEMNATIVE_EFLAGS_OPTIMIZATION_STATS(a_fEflInput, a_fEflOutput) \
|
---|
3370 | iemNativeEFlagsOptimizationStats(pReNative, a_fEflInput, a_fEflOutput)
|
---|
3371 |
|
---|
3372 | DECLINLINE(void) iemNativeEFlagsOptimizationStats(PIEMRECOMPILERSTATE pReNative, uint32_t fEflInput, uint32_t fEflOutput)
|
---|
3373 | {
|
---|
3374 | if (fEflOutput)
|
---|
3375 | {
|
---|
3376 | PVMCPUCC const pVCpu = pReNative->pVCpu;
|
---|
3377 | # ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
3378 | IEMLIVENESSBIT const LivenessBit0 = pReNative->paLivenessEntries[pReNative->idxCurCall].Bit0;
|
---|
3379 | IEMLIVENESSBIT const LivenessBit1 = pReNative->paLivenessEntries[pReNative->idxCurCall].Bit1;
|
---|
3380 | AssertCompile(IEMLIVENESS_STATE_CLOBBERED == 0);
|
---|
3381 | # define CHECK_FLAG_AND_UPDATE_STATS(a_fEfl, a_fLivenessMember, a_CoreStatName) \
|
---|
3382 | if (fEflOutput & (a_fEfl)) \
|
---|
3383 | { \
|
---|
3384 | if (LivenessBit0.a_fLivenessMember | LivenessBit1.a_fLivenessMember) \
|
---|
3385 | STAM_COUNTER_INC(&pVCpu->iem.s.a_CoreStatName ## Required); \
|
---|
3386 | else \
|
---|
3387 | STAM_COUNTER_INC(&pVCpu->iem.s.a_CoreStatName ## Skippable); \
|
---|
3388 | } else do { } while (0)
|
---|
3389 | # else
|
---|
3390 | PCIEMLIVENESSENTRY const pLivenessEntry = &pReNative->paLivenessEntries[pReNative->idxCurCall];
|
---|
3391 | IEMLIVENESSBIT const LivenessClobbered =
|
---|
3392 | {
|
---|
3393 | pLivenessEntry->aBits[IEMLIVENESS_BIT_WRITE].bm64
|
---|
3394 | & ~( pLivenessEntry->aBits[IEMLIVENESS_BIT_READ].bm64
|
---|
3395 | | pLivenessEntry->aBits[IEMLIVENESS_BIT_POT_XCPT_OR_CALL].bm64
|
---|
3396 | | pLivenessEntry->aBits[IEMLIVENESS_BIT_OTHER].bm64)
|
---|
3397 | };
|
---|
3398 | IEMLIVENESSBIT const LivenessDelayable =
|
---|
3399 | {
|
---|
3400 | pLivenessEntry->aBits[IEMLIVENESS_BIT_WRITE].bm64
|
---|
3401 | & pLivenessEntry->aBits[IEMLIVENESS_BIT_POT_XCPT_OR_CALL].bm64
|
---|
3402 | & ~( pLivenessEntry->aBits[IEMLIVENESS_BIT_READ].bm64
|
---|
3403 | | pLivenessEntry->aBits[IEMLIVENESS_BIT_OTHER].bm64)
|
---|
3404 | };
|
---|
3405 | # define CHECK_FLAG_AND_UPDATE_STATS(a_fEfl, a_fLivenessMember, a_CoreStatName) \
|
---|
3406 | if (fEflOutput & (a_fEfl)) \
|
---|
3407 | { \
|
---|
3408 | if (LivenessClobbered.a_fLivenessMember) \
|
---|
3409 | STAM_COUNTER_INC(&pVCpu->iem.s.a_CoreStatName ## Skippable); \
|
---|
3410 | else if (LivenessDelayable.a_fLivenessMember) \
|
---|
3411 | STAM_COUNTER_INC(&pVCpu->iem.s.a_CoreStatName ## Delayable); \
|
---|
3412 | else \
|
---|
3413 | STAM_COUNTER_INC(&pVCpu->iem.s.a_CoreStatName ## Required); \
|
---|
3414 | } else do { } while (0)
|
---|
3415 | # endif
|
---|
3416 | CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_CF, fEflCf, StatNativeLivenessEflCf);
|
---|
3417 | CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_PF, fEflPf, StatNativeLivenessEflPf);
|
---|
3418 | CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_AF, fEflAf, StatNativeLivenessEflAf);
|
---|
3419 | CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_ZF, fEflZf, StatNativeLivenessEflZf);
|
---|
3420 | CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_SF, fEflSf, StatNativeLivenessEflSf);
|
---|
3421 | CHECK_FLAG_AND_UPDATE_STATS(X86_EFL_OF, fEflOf, StatNativeLivenessEflOf);
|
---|
3422 | //CHECK_FLAG_AND_UPDATE_STATS(~X86_EFL_STATUS_BITS, fEflOther, StatNativeLivenessEflOther);
|
---|
3423 | # undef CHECK_FLAG_AND_UPDATE_STATS
|
---|
3424 | }
|
---|
3425 | RT_NOREF(fEflInput);
|
---|
3426 | }
|
---|
3427 | #endif /* VBOX_WITH_STATISTICS */
|
---|
3428 |
|
---|
3429 | #undef IEM_MC_FETCH_EFLAGS /* should not be used */
|
---|
3430 | #define IEM_MC_FETCH_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) \
|
---|
3431 | off = iemNativeEmitFetchEFlags(pReNative, off, a_EFlags, a_fEflInput, a_fEflOutput)
|
---|
3432 |
|
---|
3433 | /** Handles IEM_MC_FETCH_EFLAGS_EX. */
|
---|
3434 | DECL_INLINE_THROW(uint32_t)
|
---|
3435 | iemNativeEmitFetchEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarEFlags,
|
---|
3436 | uint32_t fEflInput, uint32_t fEflOutput)
|
---|
3437 | {
|
---|
3438 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarEFlags);
|
---|
3439 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVarEFlags, sizeof(uint32_t));
|
---|
3440 | RT_NOREF(fEflInput, fEflOutput);
|
---|
3441 |
|
---|
3442 | #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
|
---|
3443 | # ifdef VBOX_STRICT
|
---|
3444 | if ( pReNative->idxCurCall != 0
|
---|
3445 | && (fEflInput != 0 || fEflOutput != 0) /* for NOT these are both zero for now. */)
|
---|
3446 | {
|
---|
3447 | PCIEMLIVENESSENTRY const pLivenessEntry = &pReNative->paLivenessEntries[pReNative->idxCurCall - 1];
|
---|
3448 | uint32_t const fBoth = fEflInput | fEflOutput;
|
---|
3449 | # define ASSERT_ONE_EFL(a_fElfConst, a_idxField) \
|
---|
3450 | AssertMsg( !(fBoth & (a_fElfConst)) \
|
---|
3451 | || (!(fEflInput & (a_fElfConst)) \
|
---|
3452 | ? IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, a_idxField)) \
|
---|
3453 | : !(fEflOutput & (a_fElfConst)) \
|
---|
3454 | ? IEMLIVENESS_STATE_IS_INPUT_EXPECTED( iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, a_idxField)) \
|
---|
3455 | : IEMLIVENESS_STATE_IS_MODIFY_EXPECTED( iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, a_idxField)) ), \
|
---|
3456 | ("%s - %u\n", #a_fElfConst, iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, a_idxField)))
|
---|
3457 | ASSERT_ONE_EFL(~(uint32_t)X86_EFL_STATUS_BITS, IEMLIVENESSBIT_IDX_EFL_OTHER);
|
---|
3458 | ASSERT_ONE_EFL(X86_EFL_CF, IEMLIVENESSBIT_IDX_EFL_CF);
|
---|
3459 | ASSERT_ONE_EFL(X86_EFL_PF, IEMLIVENESSBIT_IDX_EFL_PF);
|
---|
3460 | ASSERT_ONE_EFL(X86_EFL_AF, IEMLIVENESSBIT_IDX_EFL_AF);
|
---|
3461 | ASSERT_ONE_EFL(X86_EFL_ZF, IEMLIVENESSBIT_IDX_EFL_ZF);
|
---|
3462 | ASSERT_ONE_EFL(X86_EFL_SF, IEMLIVENESSBIT_IDX_EFL_SF);
|
---|
3463 | ASSERT_ONE_EFL(X86_EFL_OF, IEMLIVENESSBIT_IDX_EFL_OF);
|
---|
3464 | # undef ASSERT_ONE_EFL
|
---|
3465 | }
|
---|
3466 | # endif
|
---|
3467 | #endif
|
---|
3468 |
|
---|
3469 | IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fEflInput);
|
---|
3470 |
|
---|
3471 | /** @todo this is suboptimial. EFLAGS is probably shadowed and we should use
|
---|
3472 | * the existing shadow copy. */
|
---|
3473 | uint8_t const idxReg = iemNativeVarRegisterAcquire(pReNative, idxVarEFlags, &off, false /*fInitialized*/);
|
---|
3474 | iemNativeRegClearAndMarkAsGstRegShadow(pReNative, idxReg, kIemNativeGstReg_EFlags, off);
|
---|
3475 | off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, idxReg, RT_UOFFSETOF(VMCPUCC, cpum.GstCtx.eflags));
|
---|
3476 | iemNativeVarRegisterRelease(pReNative, idxVarEFlags);
|
---|
3477 | return off;
|
---|
3478 | }
|
---|
3479 |
|
---|
3480 |
|
---|
3481 |
|
---|
3482 | /** @todo emit strict build assertions for IEM_MC_COMMIT_EFLAGS_EX when we
|
---|
3483 | * start using it with custom native code emission (inlining assembly
|
---|
3484 | * instruction helpers). */
|
---|
3485 | #undef IEM_MC_COMMIT_EFLAGS /* should not be used */
|
---|
3486 | #define IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) \
|
---|
3487 | IEMNATIVE_EFLAGS_OPTIMIZATION_STATS(a_fEflInput, a_fEflOutput); \
|
---|
3488 | off = iemNativeEmitCommitEFlags(pReNative, off, a_EFlags, a_fEflOutput, true /*fUpdateSkipping*/)
|
---|
3489 |
|
---|
3490 | #undef IEM_MC_COMMIT_EFLAGS_OPT /* should not be used */
|
---|
3491 | #define IEM_MC_COMMIT_EFLAGS_OPT_EX(a_EFlags, a_fEflInput, a_fEflOutput) \
|
---|
3492 | IEMNATIVE_EFLAGS_OPTIMIZATION_STATS(a_fEflInput, a_fEflOutput); \
|
---|
3493 | off = iemNativeEmitCommitEFlags(pReNative, off, a_EFlags, a_fEflOutput, false /*fUpdateSkipping*/)
|
---|
3494 |
|
---|
3495 | /** Handles IEM_MC_COMMIT_EFLAGS_EX. */
|
---|
3496 | DECL_INLINE_THROW(uint32_t)
|
---|
3497 | iemNativeEmitCommitEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarEFlags, uint32_t fEflOutput,
|
---|
3498 | bool fUpdateSkipping)
|
---|
3499 | {
|
---|
3500 | RT_NOREF(fEflOutput);
|
---|
3501 | uint8_t const idxReg = iemNativeVarRegisterAcquire(pReNative, idxVarEFlags, &off, true /*fInitialized*/);
|
---|
3502 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVarEFlags, sizeof(uint32_t));
|
---|
3503 |
|
---|
3504 | #ifdef VBOX_STRICT
|
---|
3505 | off = iemNativeEmitTestAnyBitsInGpr(pReNative, off, idxReg, X86_EFL_RA1_MASK);
|
---|
3506 | uint32_t offFixup = off;
|
---|
3507 | off = iemNativeEmitJnzToFixed(pReNative, off, off);
|
---|
3508 | off = iemNativeEmitBrk(pReNative, off, UINT32_C(0x2001));
|
---|
3509 | iemNativeFixupFixedJump(pReNative, offFixup, off);
|
---|
3510 |
|
---|
3511 | off = iemNativeEmitTestAnyBitsInGpr(pReNative, off, idxReg, X86_EFL_RAZ_MASK & CPUMX86EFLAGS_HW_MASK_32);
|
---|
3512 | offFixup = off;
|
---|
3513 | off = iemNativeEmitJzToFixed(pReNative, off, off);
|
---|
3514 | off = iemNativeEmitBrk(pReNative, off, UINT32_C(0x2002));
|
---|
3515 | iemNativeFixupFixedJump(pReNative, offFixup, off);
|
---|
3516 |
|
---|
3517 | /** @todo validate that only bits in the fElfOutput mask changed. */
|
---|
3518 | #endif
|
---|
3519 |
|
---|
3520 | #ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING
|
---|
3521 | if (fUpdateSkipping)
|
---|
3522 | {
|
---|
3523 | if ((fEflOutput & X86_EFL_STATUS_BITS) == X86_EFL_STATUS_BITS)
|
---|
3524 | off = iemNativeEmitStoreImmToVCpuU32(pReNative, off, 0, RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags));
|
---|
3525 | else
|
---|
3526 | off = iemNativeEmitAndImmIntoVCpuU32(pReNative, off, ~(fEflOutput & X86_EFL_STATUS_BITS),
|
---|
3527 | RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags));
|
---|
3528 | }
|
---|
3529 | #else
|
---|
3530 | RT_NOREF_PV(fUpdateSkipping);
|
---|
3531 | #endif
|
---|
3532 |
|
---|
3533 | iemNativeRegClearAndMarkAsGstRegShadow(pReNative, idxReg, kIemNativeGstReg_EFlags, off);
|
---|
3534 | off = iemNativeEmitStoreGprToVCpuU32(pReNative, off, idxReg, RT_UOFFSETOF_DYN(VMCPUCC, cpum.GstCtx.eflags));
|
---|
3535 | iemNativeVarRegisterRelease(pReNative, idxVarEFlags);
|
---|
3536 | return off;
|
---|
3537 | }
|
---|
3538 |
|
---|
3539 |
|
---|
3540 |
|
---|
3541 | /*********************************************************************************************************************************
|
---|
3542 | * Emitters for segment register fetches (IEM_MC_FETCH_SREG_XXX).
|
---|
3543 | *********************************************************************************************************************************/
|
---|
3544 |
|
---|
3545 | #define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) \
|
---|
3546 | off = iemNativeEmitFetchSReg(pReNative, off, a_u16Dst, a_iSReg, sizeof(uint16_t))
|
---|
3547 |
|
---|
3548 | #define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) \
|
---|
3549 | off = iemNativeEmitFetchSReg(pReNative, off, a_u32Dst, a_iSReg, sizeof(uint32_t))
|
---|
3550 |
|
---|
3551 | #define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) \
|
---|
3552 | off = iemNativeEmitFetchSReg(pReNative, off, a_u64Dst, a_iSReg, sizeof(uint64_t))
|
---|
3553 |
|
---|
3554 |
|
---|
3555 | /** Emits code for IEM_MC_FETCH_SREG_U16, IEM_MC_FETCH_SREG_ZX_U32 and
|
---|
3556 | * IEM_MC_FETCH_SREG_ZX_U64. */
|
---|
3557 | DECL_INLINE_THROW(uint32_t)
|
---|
3558 | iemNativeEmitFetchSReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar, uint8_t iSReg, int8_t cbVar)
|
---|
3559 | {
|
---|
3560 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
3561 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, cbVar); RT_NOREF(cbVar);
|
---|
3562 | Assert(iSReg < X86_SREG_COUNT);
|
---|
3563 |
|
---|
3564 | /*
|
---|
3565 | * For now, we will not create a shadow copy of a selector. The rational
|
---|
3566 | * is that since we do not recompile the popping and loading of segment
|
---|
3567 | * registers and that the the IEM_MC_FETCH_SREG_U* MCs are only used for
|
---|
3568 | * pushing and moving to registers, there is only a small chance that the
|
---|
3569 | * shadow copy will be accessed again before the register is reloaded. One
|
---|
3570 | * scenario would be nested called in 16-bit code, but I doubt it's worth
|
---|
3571 | * the extra register pressure atm.
|
---|
3572 | *
|
---|
3573 | * What we really need first, though, is to combine iemNativeRegAllocTmpForGuestReg
|
---|
3574 | * and iemNativeVarRegisterAcquire for a load scenario. We only got the
|
---|
3575 | * store scencario covered at present (r160730).
|
---|
3576 | */
|
---|
3577 | iemNativeVarSetKindToStack(pReNative, idxDstVar);
|
---|
3578 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
3579 | off = iemNativeEmitLoadGprFromVCpuU16(pReNative, off, idxVarReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aSRegs[iSReg].Sel));
|
---|
3580 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
3581 | return off;
|
---|
3582 | }
|
---|
3583 |
|
---|
3584 |
|
---|
3585 |
|
---|
3586 | /*********************************************************************************************************************************
|
---|
3587 | * Register references. *
|
---|
3588 | *********************************************************************************************************************************/
|
---|
3589 |
|
---|
3590 | #define IEM_MC_REF_GREG_U8_THREADED(a_pu8Dst, a_iGRegEx) \
|
---|
3591 | off = iemNativeEmitRefGregU8(pReNative, off, a_pu8Dst, a_iGRegEx, false /*fConst*/)
|
---|
3592 |
|
---|
3593 | #define IEM_MC_REF_GREG_U8_CONST_THREADED(a_pu8Dst, a_iGRegEx) \
|
---|
3594 | off = iemNativeEmitRefGregU8(pReNative, off, a_pu8Dst, a_iGRegEx, true /*fConst*/)
|
---|
3595 |
|
---|
3596 | /** Handles IEM_MC_REF_GREG_U8[_CONST]. */
|
---|
3597 | DECL_INLINE_THROW(uint32_t)
|
---|
3598 | iemNativeEmitRefGregU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarRef, uint8_t iGRegEx, bool fConst)
|
---|
3599 | {
|
---|
3600 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarRef);
|
---|
3601 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVarRef, sizeof(void *));
|
---|
3602 | Assert(iGRegEx < 20);
|
---|
3603 |
|
---|
3604 | if (iGRegEx < 16)
|
---|
3605 | iemNativeVarSetKindToGstRegRef(pReNative, idxVarRef, kIemNativeGstRegRef_Gpr, iGRegEx & 15);
|
---|
3606 | else
|
---|
3607 | iemNativeVarSetKindToGstRegRef(pReNative, idxVarRef, kIemNativeGstRegRef_GprHighByte, iGRegEx & 15);
|
---|
3608 |
|
---|
3609 | /* If we've delayed writing back the register value, flush it now. */
|
---|
3610 | off = iemNativeRegFlushPendingSpecificWrite(pReNative, off, kIemNativeGstRegRef_Gpr, iGRegEx & 15);
|
---|
3611 |
|
---|
3612 | /* If it's not a const reference we need to flush the shadow copy of the register now. */
|
---|
3613 | if (!fConst)
|
---|
3614 | iemNativeRegFlushGuestShadows(pReNative, RT_BIT_64(IEMNATIVEGSTREG_GPR(iGRegEx & 15)));
|
---|
3615 |
|
---|
3616 | return off;
|
---|
3617 | }
|
---|
3618 |
|
---|
3619 | #define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) \
|
---|
3620 | off = iemNativeEmitRefGregUxx(pReNative, off, a_pu16Dst, a_iGReg, false /*fConst*/)
|
---|
3621 |
|
---|
3622 | #define IEM_MC_REF_GREG_U16_CONST(a_pu16Dst, a_iGReg) \
|
---|
3623 | off = iemNativeEmitRefGregUxx(pReNative, off, a_pu16Dst, a_iGReg, true /*fConst*/)
|
---|
3624 |
|
---|
3625 | #define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) \
|
---|
3626 | off = iemNativeEmitRefGregUxx(pReNative, off, a_pu32Dst, a_iGReg, false /*fConst*/)
|
---|
3627 |
|
---|
3628 | #define IEM_MC_REF_GREG_U32_CONST(a_pu32Dst, a_iGReg) \
|
---|
3629 | off = iemNativeEmitRefGregUxx(pReNative, off, a_pu32Dst, a_iGReg, true /*fConst*/)
|
---|
3630 |
|
---|
3631 | #define IEM_MC_REF_GREG_I32(a_pi32Dst, a_iGReg) \
|
---|
3632 | off = iemNativeEmitRefGregUxx(pReNative, off, a_pi32Dst, a_iGReg, false /*fConst*/)
|
---|
3633 |
|
---|
3634 | #define IEM_MC_REF_GREG_I32_CONST(a_pi32Dst, a_iGReg) \
|
---|
3635 | off = iemNativeEmitRefGregUxx(pReNative, off, a_pi32Dst, a_iGReg, true /*fConst*/)
|
---|
3636 |
|
---|
3637 | #define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) \
|
---|
3638 | off = iemNativeEmitRefGregUxx(pReNative, off, a_pu64Dst, a_iGReg, false /*fConst*/)
|
---|
3639 |
|
---|
3640 | #define IEM_MC_REF_GREG_U64_CONST(a_pu64Dst, a_iGReg) \
|
---|
3641 | off = iemNativeEmitRefGregUxx(pReNative, off, a_pu64Dst, a_iGReg, true /*fConst*/)
|
---|
3642 |
|
---|
3643 | #define IEM_MC_REF_GREG_I64(a_pi64Dst, a_iGReg) \
|
---|
3644 | off = iemNativeEmitRefGregUxx(pReNative, off, a_pi64Dst, a_iGReg, false /*fConst*/)
|
---|
3645 |
|
---|
3646 | #define IEM_MC_REF_GREG_I64_CONST(a_pi64Dst, a_iGReg) \
|
---|
3647 | off = iemNativeEmitRefGregUxx(pReNative, off, a_pi64Dst, a_iGReg, true /*fConst*/)
|
---|
3648 |
|
---|
3649 | /** Handles IEM_MC_REF_GREG_Uxx[_CONST] and IEM_MC_REF_GREG_Ixx[_CONST]. */
|
---|
3650 | DECL_INLINE_THROW(uint32_t)
|
---|
3651 | iemNativeEmitRefGregUxx(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarRef, uint8_t iGReg, bool fConst)
|
---|
3652 | {
|
---|
3653 | Assert(iGReg < 16);
|
---|
3654 | iemNativeVarSetKindToGstRegRef(pReNative, idxVarRef, kIemNativeGstRegRef_Gpr, iGReg);
|
---|
3655 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVarRef, sizeof(void *));
|
---|
3656 |
|
---|
3657 | /* If we've delayed writing back the register value, flush it now. */
|
---|
3658 | off = iemNativeRegFlushPendingSpecificWrite(pReNative, off, kIemNativeGstRegRef_Gpr, iGReg);
|
---|
3659 |
|
---|
3660 | /* If it's not a const reference we need to flush the shadow copy of the register now. */
|
---|
3661 | if (!fConst)
|
---|
3662 | iemNativeRegFlushGuestShadows(pReNative, RT_BIT_64(IEMNATIVEGSTREG_GPR(iGReg)));
|
---|
3663 |
|
---|
3664 | return off;
|
---|
3665 | }
|
---|
3666 |
|
---|
3667 |
|
---|
3668 | #undef IEM_MC_REF_EFLAGS /* should not be used. */
|
---|
3669 | #define IEM_MC_REF_EFLAGS_EX(a_pEFlags, a_fEflInput, a_fEflOutput) \
|
---|
3670 | IEMNATIVE_EFLAGS_OPTIMIZATION_STATS(a_fEflInput, a_fEflOutput); \
|
---|
3671 | off = iemNativeEmitRefEFlags(pReNative, off, a_pEFlags, a_fEflInput, a_fEflOutput)
|
---|
3672 |
|
---|
3673 | /** Handles IEM_MC_REF_EFLAGS. */
|
---|
3674 | DECL_INLINE_THROW(uint32_t)
|
---|
3675 | iemNativeEmitRefEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarRef, uint32_t fEflInput, uint32_t fEflOutput)
|
---|
3676 | {
|
---|
3677 | iemNativeVarSetKindToGstRegRef(pReNative, idxVarRef, kIemNativeGstRegRef_EFlags, 0);
|
---|
3678 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVarRef, sizeof(void *));
|
---|
3679 |
|
---|
3680 | #ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING
|
---|
3681 | IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fEflInput);
|
---|
3682 |
|
---|
3683 | /* Updating the skipping according to the outputs is a little early, but
|
---|
3684 | we don't have any other hooks for references atm. */
|
---|
3685 | if ((fEflOutput & X86_EFL_STATUS_BITS) == X86_EFL_STATUS_BITS)
|
---|
3686 | off = iemNativeEmitStoreImmToVCpuU32(pReNative, off, 0, RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags));
|
---|
3687 | else if (fEflOutput & X86_EFL_STATUS_BITS)
|
---|
3688 | off = iemNativeEmitAndImmIntoVCpuU32(pReNative, off, ~(fEflOutput & X86_EFL_STATUS_BITS),
|
---|
3689 | RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags));
|
---|
3690 | #else
|
---|
3691 | RT_NOREF(fEflInput, fEflOutput);
|
---|
3692 | #endif
|
---|
3693 |
|
---|
3694 | /* If we've delayed writing back the register value, flush it now. */
|
---|
3695 | off = iemNativeRegFlushPendingSpecificWrite(pReNative, off, kIemNativeGstRegRef_EFlags, 0);
|
---|
3696 |
|
---|
3697 | /* If there is a shadow copy of guest EFLAGS, flush it now. */
|
---|
3698 | iemNativeRegFlushGuestShadows(pReNative, RT_BIT_64(kIemNativeGstReg_EFlags));
|
---|
3699 |
|
---|
3700 | return off;
|
---|
3701 | }
|
---|
3702 |
|
---|
3703 |
|
---|
3704 | /** @todo Emit code for IEM_MC_ASSERT_EFLAGS in strict builds? Once we emit
|
---|
3705 | * different code from threaded recompiler, maybe it would be helpful. For now
|
---|
3706 | * we assume the threaded recompiler catches any incorrect EFLAGS delcarations. */
|
---|
3707 | #define IEM_MC_ASSERT_EFLAGS(a_fEflInput, a_fEflOutput) ((void)0)
|
---|
3708 |
|
---|
3709 |
|
---|
3710 | #define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
|
---|
3711 | off = iemNativeEmitRefXregXxx(pReNative, off, a_pu128Dst, a_iXReg, false /*fConst*/)
|
---|
3712 |
|
---|
3713 | #define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
|
---|
3714 | off = iemNativeEmitRefXregXxx(pReNative, off, a_pu128Dst, a_iXReg, true /*fConst*/)
|
---|
3715 |
|
---|
3716 | #define IEM_MC_REF_XREG_XMM_CONST(a_pXmmDst, a_iXReg) \
|
---|
3717 | off = iemNativeEmitRefXregXxx(pReNative, off, a_pXmmDst, a_iXReg, true /*fConst*/)
|
---|
3718 |
|
---|
3719 | /** Handles IEM_MC_REF_XREG_xxx[_CONST]. */
|
---|
3720 | DECL_INLINE_THROW(uint32_t)
|
---|
3721 | iemNativeEmitRefXregXxx(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarRef, uint8_t iXReg, bool fConst)
|
---|
3722 | {
|
---|
3723 | Assert(iXReg < 16);
|
---|
3724 | iemNativeVarSetKindToGstRegRef(pReNative, idxVarRef, kIemNativeGstRegRef_XReg, iXReg);
|
---|
3725 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVarRef, sizeof(void *));
|
---|
3726 |
|
---|
3727 | /* If we've delayed writing back the register value, flush it now. */
|
---|
3728 | off = iemNativeRegFlushPendingSpecificWrite(pReNative, off, kIemNativeGstRegRef_XReg, iXReg);
|
---|
3729 |
|
---|
3730 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
3731 | /* If it's not a const reference we need to flush the shadow copy of the register now. */
|
---|
3732 | if (!fConst)
|
---|
3733 | iemNativeSimdRegFlushGuestShadows(pReNative, RT_BIT_64(IEMNATIVEGSTSIMDREG_SIMD(iXReg)));
|
---|
3734 | #else
|
---|
3735 | RT_NOREF(fConst);
|
---|
3736 | #endif
|
---|
3737 |
|
---|
3738 | return off;
|
---|
3739 | }
|
---|
3740 |
|
---|
3741 |
|
---|
3742 | #define IEM_MC_REF_MXCSR(a_pfMxcsr) \
|
---|
3743 | off = iemNativeEmitRefMxcsr(pReNative, off, a_pfMxcsr)
|
---|
3744 |
|
---|
3745 | /** Handles IEM_MC_REF_MXCSR. */
|
---|
3746 | DECL_INLINE_THROW(uint32_t)
|
---|
3747 | iemNativeEmitRefMxcsr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarRef)
|
---|
3748 | {
|
---|
3749 | iemNativeVarSetKindToGstRegRef(pReNative, idxVarRef, kIemNativeGstRegRef_MxCsr, 0);
|
---|
3750 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVarRef, sizeof(void *));
|
---|
3751 |
|
---|
3752 | /* If we've delayed writing back the register value, flush it now. */
|
---|
3753 | off = iemNativeRegFlushPendingSpecificWrite(pReNative, off, kIemNativeGstRegRef_MxCsr, 0);
|
---|
3754 |
|
---|
3755 | /* If there is a shadow copy of guest MXCSR, flush it now. */
|
---|
3756 | iemNativeRegFlushGuestShadows(pReNative, RT_BIT_64(kIemNativeGstReg_MxCsr));
|
---|
3757 |
|
---|
3758 | return off;
|
---|
3759 | }
|
---|
3760 |
|
---|
3761 |
|
---|
3762 |
|
---|
3763 | /*********************************************************************************************************************************
|
---|
3764 | * Effective Address Calculation *
|
---|
3765 | *********************************************************************************************************************************/
|
---|
3766 | #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_16(a_GCPtrEff, a_bRm, a_u16Disp) \
|
---|
3767 | off = iemNativeEmitCalcRmEffAddrThreadedAddr16(pReNative, off, a_bRm, a_u16Disp, a_GCPtrEff)
|
---|
3768 |
|
---|
3769 | /** Emit code for IEM_MC_CALC_RM_EFF_ADDR_THREADED_16.
|
---|
3770 | * @sa iemOpHlpCalcRmEffAddrThreadedAddr16 */
|
---|
3771 | DECL_INLINE_THROW(uint32_t)
|
---|
3772 | iemNativeEmitCalcRmEffAddrThreadedAddr16(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
3773 | uint8_t bRm, uint16_t u16Disp, uint8_t idxVarRet)
|
---|
3774 | {
|
---|
3775 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarRet);
|
---|
3776 |
|
---|
3777 | /*
|
---|
3778 | * Handle the disp16 form with no registers first.
|
---|
3779 | *
|
---|
3780 | * Convert to an immediate value, as that'll delay the register allocation
|
---|
3781 | * and assignment till the memory access / call / whatever and we can use
|
---|
3782 | * a more appropriate register (or none at all).
|
---|
3783 | */
|
---|
3784 | if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
|
---|
3785 | {
|
---|
3786 | iemNativeVarSetKindToConst(pReNative, idxVarRet, u16Disp);
|
---|
3787 | return off;
|
---|
3788 | }
|
---|
3789 |
|
---|
3790 | /* Determin the displacment. */
|
---|
3791 | uint16_t u16EffAddr;
|
---|
3792 | switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
|
---|
3793 | {
|
---|
3794 | case 0: u16EffAddr = 0; break;
|
---|
3795 | case 1: u16EffAddr = (int16_t)(int8_t)u16Disp; break;
|
---|
3796 | case 2: u16EffAddr = u16Disp; break;
|
---|
3797 | default: AssertFailedStmt(u16EffAddr = 0);
|
---|
3798 | }
|
---|
3799 |
|
---|
3800 | /* Determine the registers involved. */
|
---|
3801 | uint8_t idxGstRegBase;
|
---|
3802 | uint8_t idxGstRegIndex;
|
---|
3803 | switch (bRm & X86_MODRM_RM_MASK)
|
---|
3804 | {
|
---|
3805 | case 0:
|
---|
3806 | idxGstRegBase = X86_GREG_xBX;
|
---|
3807 | idxGstRegIndex = X86_GREG_xSI;
|
---|
3808 | break;
|
---|
3809 | case 1:
|
---|
3810 | idxGstRegBase = X86_GREG_xBX;
|
---|
3811 | idxGstRegIndex = X86_GREG_xDI;
|
---|
3812 | break;
|
---|
3813 | case 2:
|
---|
3814 | idxGstRegBase = X86_GREG_xBP;
|
---|
3815 | idxGstRegIndex = X86_GREG_xSI;
|
---|
3816 | break;
|
---|
3817 | case 3:
|
---|
3818 | idxGstRegBase = X86_GREG_xBP;
|
---|
3819 | idxGstRegIndex = X86_GREG_xDI;
|
---|
3820 | break;
|
---|
3821 | case 4:
|
---|
3822 | idxGstRegBase = X86_GREG_xSI;
|
---|
3823 | idxGstRegIndex = UINT8_MAX;
|
---|
3824 | break;
|
---|
3825 | case 5:
|
---|
3826 | idxGstRegBase = X86_GREG_xDI;
|
---|
3827 | idxGstRegIndex = UINT8_MAX;
|
---|
3828 | break;
|
---|
3829 | case 6:
|
---|
3830 | idxGstRegBase = X86_GREG_xBP;
|
---|
3831 | idxGstRegIndex = UINT8_MAX;
|
---|
3832 | break;
|
---|
3833 | #ifdef _MSC_VER /* lazy compiler, thinks idxGstRegBase and idxGstRegIndex may otherwise be used uninitialized. */
|
---|
3834 | default:
|
---|
3835 | #endif
|
---|
3836 | case 7:
|
---|
3837 | idxGstRegBase = X86_GREG_xBX;
|
---|
3838 | idxGstRegIndex = UINT8_MAX;
|
---|
3839 | break;
|
---|
3840 | }
|
---|
3841 |
|
---|
3842 | /*
|
---|
3843 | * Now emit code that calculates: idxRegRet = (uint16_t)(u16EffAddr + idxGstRegBase [+ idxGstRegIndex])
|
---|
3844 | */
|
---|
3845 | uint8_t const idxRegRet = iemNativeVarRegisterAcquire(pReNative, idxVarRet, &off);
|
---|
3846 | uint8_t const idxRegBase = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(idxGstRegBase),
|
---|
3847 | kIemNativeGstRegUse_ReadOnly);
|
---|
3848 | uint8_t const idxRegIndex = idxGstRegIndex != UINT8_MAX
|
---|
3849 | ? iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(idxGstRegIndex),
|
---|
3850 | kIemNativeGstRegUse_ReadOnly)
|
---|
3851 | : UINT8_MAX;
|
---|
3852 | #ifdef RT_ARCH_AMD64
|
---|
3853 | if (idxRegIndex == UINT8_MAX)
|
---|
3854 | {
|
---|
3855 | if (u16EffAddr == 0)
|
---|
3856 | {
|
---|
3857 | /* movxz ret, base */
|
---|
3858 | off = iemNativeEmitLoadGprFromGpr16(pReNative, off, idxRegRet, idxRegBase);
|
---|
3859 | }
|
---|
3860 | else
|
---|
3861 | {
|
---|
3862 | /* lea ret32, [base64 + disp32] */
|
---|
3863 | Assert(idxRegBase != X86_GREG_xSP /*SIB*/);
|
---|
3864 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
|
---|
3865 | if (idxRegRet >= 8 || idxRegBase >= 8)
|
---|
3866 | pbCodeBuf[off++] = (idxRegRet >= 8 ? X86_OP_REX_R : 0) | (idxRegBase >= 8 ? X86_OP_REX_B : 0);
|
---|
3867 | pbCodeBuf[off++] = 0x8d;
|
---|
3868 | if (idxRegBase != X86_GREG_x12 /*SIB*/)
|
---|
3869 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, idxRegRet & 7, idxRegBase & 7);
|
---|
3870 | else
|
---|
3871 | {
|
---|
3872 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, idxRegRet & 7, 4 /*SIB*/);
|
---|
3873 | pbCodeBuf[off++] = X86_SIB_MAKE(X86_GREG_x12 & 7, 4 /*no index*/, 0);
|
---|
3874 | }
|
---|
3875 | pbCodeBuf[off++] = RT_BYTE1(u16EffAddr);
|
---|
3876 | pbCodeBuf[off++] = RT_BYTE2(u16EffAddr);
|
---|
3877 | pbCodeBuf[off++] = 0;
|
---|
3878 | pbCodeBuf[off++] = 0;
|
---|
3879 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
3880 |
|
---|
3881 | off = iemNativeEmitClear16UpGpr(pReNative, off, idxRegRet);
|
---|
3882 | }
|
---|
3883 | }
|
---|
3884 | else
|
---|
3885 | {
|
---|
3886 | /* lea ret32, [index64 + base64 (+ disp32)] */
|
---|
3887 | Assert(idxRegIndex != X86_GREG_xSP /*no-index*/);
|
---|
3888 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
|
---|
3889 | if (idxRegRet >= 8 || idxRegBase >= 8 || idxRegIndex >= 8)
|
---|
3890 | pbCodeBuf[off++] = (idxRegRet >= 8 ? X86_OP_REX_R : 0)
|
---|
3891 | | (idxRegBase >= 8 ? X86_OP_REX_B : 0)
|
---|
3892 | | (idxRegIndex >= 8 ? X86_OP_REX_X : 0);
|
---|
3893 | pbCodeBuf[off++] = 0x8d;
|
---|
3894 | uint8_t const bMod = u16EffAddr == 0 && (idxRegBase & 7) != X86_GREG_xBP ? X86_MOD_MEM0 : X86_MOD_MEM4;
|
---|
3895 | pbCodeBuf[off++] = X86_MODRM_MAKE(bMod, idxRegRet & 7, 4 /*SIB*/);
|
---|
3896 | pbCodeBuf[off++] = X86_SIB_MAKE(idxRegBase & 7, idxRegIndex & 7, 0);
|
---|
3897 | if (bMod == X86_MOD_MEM4)
|
---|
3898 | {
|
---|
3899 | pbCodeBuf[off++] = RT_BYTE1(u16EffAddr);
|
---|
3900 | pbCodeBuf[off++] = RT_BYTE2(u16EffAddr);
|
---|
3901 | pbCodeBuf[off++] = 0;
|
---|
3902 | pbCodeBuf[off++] = 0;
|
---|
3903 | }
|
---|
3904 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
3905 | off = iemNativeEmitClear16UpGpr(pReNative, off, idxRegRet);
|
---|
3906 | }
|
---|
3907 |
|
---|
3908 | #elif defined(RT_ARCH_ARM64)
|
---|
3909 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);
|
---|
3910 | if (u16EffAddr == 0)
|
---|
3911 | {
|
---|
3912 | if (idxRegIndex == UINT8_MAX)
|
---|
3913 | pu32CodeBuf[off++] = Armv8A64MkInstrUxth(idxRegRet, idxRegBase);
|
---|
3914 | else
|
---|
3915 | {
|
---|
3916 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, idxRegRet, idxRegBase, idxRegIndex, false /*f64Bit*/);
|
---|
3917 | pu32CodeBuf[off++] = Armv8A64MkInstrUxth(idxRegRet, idxRegRet);
|
---|
3918 | }
|
---|
3919 | }
|
---|
3920 | else
|
---|
3921 | {
|
---|
3922 | if ((int16_t)u16EffAddr < 4096 && (int16_t)u16EffAddr >= 0)
|
---|
3923 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, idxRegRet, idxRegBase, u16EffAddr, false /*f64Bit*/);
|
---|
3924 | else if ((int16_t)u16EffAddr > -4096 && (int16_t)u16EffAddr < 0)
|
---|
3925 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, idxRegRet, idxRegBase,
|
---|
3926 | (uint16_t)-(int16_t)u16EffAddr, false /*f64Bit*/);
|
---|
3927 | else
|
---|
3928 | {
|
---|
3929 | pu32CodeBuf[off++] = Armv8A64MkInstrMovZ(idxRegRet, u16EffAddr);
|
---|
3930 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, idxRegRet, idxRegRet, idxRegBase, false /*f64Bit*/);
|
---|
3931 | }
|
---|
3932 | if (idxRegIndex != UINT8_MAX)
|
---|
3933 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, idxRegRet, idxRegRet, idxRegIndex, false /*f64Bit*/);
|
---|
3934 | pu32CodeBuf[off++] = Armv8A64MkInstrUxth(idxRegRet, idxRegRet);
|
---|
3935 | }
|
---|
3936 |
|
---|
3937 | #else
|
---|
3938 | # error "port me"
|
---|
3939 | #endif
|
---|
3940 |
|
---|
3941 | if (idxRegIndex != UINT8_MAX)
|
---|
3942 | iemNativeRegFreeTmp(pReNative, idxRegIndex);
|
---|
3943 | iemNativeRegFreeTmp(pReNative, idxRegBase);
|
---|
3944 | iemNativeVarRegisterRelease(pReNative, idxVarRet);
|
---|
3945 | return off;
|
---|
3946 | }
|
---|
3947 |
|
---|
3948 |
|
---|
3949 | #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_32(a_GCPtrEff, a_bRm, a_uSibAndRspOffset, a_u32Disp) \
|
---|
3950 | off = iemNativeEmitCalcRmEffAddrThreadedAddr32(pReNative, off, a_bRm, a_uSibAndRspOffset, a_u32Disp, a_GCPtrEff)
|
---|
3951 |
|
---|
3952 | /** Emit code for IEM_MC_CALC_RM_EFF_ADDR_THREADED_32.
|
---|
3953 | * @see iemOpHlpCalcRmEffAddrThreadedAddr32 */
|
---|
3954 | DECL_INLINE_THROW(uint32_t)
|
---|
3955 | iemNativeEmitCalcRmEffAddrThreadedAddr32(PIEMRECOMPILERSTATE pReNative, uint32_t off,
|
---|
3956 | uint8_t bRm, uint32_t uSibAndRspOffset, uint32_t u32Disp, uint8_t idxVarRet)
|
---|
3957 | {
|
---|
3958 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarRet);
|
---|
3959 |
|
---|
3960 | /*
|
---|
3961 | * Handle the disp32 form with no registers first.
|
---|
3962 | *
|
---|
3963 | * Convert to an immediate value, as that'll delay the register allocation
|
---|
3964 | * and assignment till the memory access / call / whatever and we can use
|
---|
3965 | * a more appropriate register (or none at all).
|
---|
3966 | */
|
---|
3967 | if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
|
---|
3968 | {
|
---|
3969 | iemNativeVarSetKindToConst(pReNative, idxVarRet, u32Disp);
|
---|
3970 | return off;
|
---|
3971 | }
|
---|
3972 |
|
---|
3973 | /* Calculate the fixed displacement (more down in SIB.B=4 and SIB.B=5 on this). */
|
---|
3974 | uint32_t u32EffAddr = 0;
|
---|
3975 | switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
|
---|
3976 | {
|
---|
3977 | case 0: break;
|
---|
3978 | case 1: u32EffAddr = (int8_t)u32Disp; break;
|
---|
3979 | case 2: u32EffAddr = u32Disp; break;
|
---|
3980 | default: AssertFailed();
|
---|
3981 | }
|
---|
3982 |
|
---|
3983 | /* Get the register (or SIB) value. */
|
---|
3984 | uint8_t idxGstRegBase = UINT8_MAX;
|
---|
3985 | uint8_t idxGstRegIndex = UINT8_MAX;
|
---|
3986 | uint8_t cShiftIndex = 0;
|
---|
3987 | switch (bRm & X86_MODRM_RM_MASK)
|
---|
3988 | {
|
---|
3989 | case 0: idxGstRegBase = X86_GREG_xAX; break;
|
---|
3990 | case 1: idxGstRegBase = X86_GREG_xCX; break;
|
---|
3991 | case 2: idxGstRegBase = X86_GREG_xDX; break;
|
---|
3992 | case 3: idxGstRegBase = X86_GREG_xBX; break;
|
---|
3993 | case 4: /* SIB */
|
---|
3994 | {
|
---|
3995 | /* index /w scaling . */
|
---|
3996 | cShiftIndex = (uSibAndRspOffset >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
|
---|
3997 | switch ((uSibAndRspOffset >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
|
---|
3998 | {
|
---|
3999 | case 0: idxGstRegIndex = X86_GREG_xAX; break;
|
---|
4000 | case 1: idxGstRegIndex = X86_GREG_xCX; break;
|
---|
4001 | case 2: idxGstRegIndex = X86_GREG_xDX; break;
|
---|
4002 | case 3: idxGstRegIndex = X86_GREG_xBX; break;
|
---|
4003 | case 4: cShiftIndex = 0; /*no index*/ break;
|
---|
4004 | case 5: idxGstRegIndex = X86_GREG_xBP; break;
|
---|
4005 | case 6: idxGstRegIndex = X86_GREG_xSI; break;
|
---|
4006 | case 7: idxGstRegIndex = X86_GREG_xDI; break;
|
---|
4007 | }
|
---|
4008 |
|
---|
4009 | /* base */
|
---|
4010 | switch (uSibAndRspOffset & X86_SIB_BASE_MASK)
|
---|
4011 | {
|
---|
4012 | case 0: idxGstRegBase = X86_GREG_xAX; break;
|
---|
4013 | case 1: idxGstRegBase = X86_GREG_xCX; break;
|
---|
4014 | case 2: idxGstRegBase = X86_GREG_xDX; break;
|
---|
4015 | case 3: idxGstRegBase = X86_GREG_xBX; break;
|
---|
4016 | case 4:
|
---|
4017 | idxGstRegBase = X86_GREG_xSP;
|
---|
4018 | u32EffAddr += uSibAndRspOffset >> 8;
|
---|
4019 | break;
|
---|
4020 | case 5:
|
---|
4021 | if ((bRm & X86_MODRM_MOD_MASK) != 0)
|
---|
4022 | idxGstRegBase = X86_GREG_xBP;
|
---|
4023 | else
|
---|
4024 | {
|
---|
4025 | Assert(u32EffAddr == 0);
|
---|
4026 | u32EffAddr = u32Disp;
|
---|
4027 | }
|
---|
4028 | break;
|
---|
4029 | case 6: idxGstRegBase = X86_GREG_xSI; break;
|
---|
4030 | case 7: idxGstRegBase = X86_GREG_xDI; break;
|
---|
4031 | }
|
---|
4032 | break;
|
---|
4033 | }
|
---|
4034 | case 5: idxGstRegBase = X86_GREG_xBP; break;
|
---|
4035 | case 6: idxGstRegBase = X86_GREG_xSI; break;
|
---|
4036 | case 7: idxGstRegBase = X86_GREG_xDI; break;
|
---|
4037 | }
|
---|
4038 |
|
---|
4039 | /*
|
---|
4040 | * If no registers are involved (SIB.B=5, SIB.X=4) repeat what we did at
|
---|
4041 | * the start of the function.
|
---|
4042 | */
|
---|
4043 | if (idxGstRegBase == UINT8_MAX && idxGstRegIndex == UINT8_MAX)
|
---|
4044 | {
|
---|
4045 | iemNativeVarSetKindToConst(pReNative, idxVarRet, u32EffAddr);
|
---|
4046 | return off;
|
---|
4047 | }
|
---|
4048 |
|
---|
4049 | /*
|
---|
4050 | * Now emit code that calculates: idxRegRet = (uint32_t)(u32EffAddr [+ idxGstRegBase] [+ (idxGstRegIndex << cShiftIndex)])
|
---|
4051 | */
|
---|
4052 | uint8_t const idxRegRet = iemNativeVarRegisterAcquire(pReNative, idxVarRet, &off);
|
---|
4053 | uint8_t idxRegBase = idxGstRegBase == UINT8_MAX ? UINT8_MAX
|
---|
4054 | : iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(idxGstRegBase),
|
---|
4055 | kIemNativeGstRegUse_ReadOnly);
|
---|
4056 | uint8_t idxRegIndex = idxGstRegIndex == UINT8_MAX ? UINT8_MAX
|
---|
4057 | : iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(idxGstRegIndex),
|
---|
4058 | kIemNativeGstRegUse_ReadOnly);
|
---|
4059 |
|
---|
4060 | /* If base is not given and there is no shifting, swap the registers to avoid code duplication. */
|
---|
4061 | if (idxRegBase == UINT8_MAX && cShiftIndex == 0)
|
---|
4062 | {
|
---|
4063 | idxRegBase = idxRegIndex;
|
---|
4064 | idxRegIndex = UINT8_MAX;
|
---|
4065 | }
|
---|
4066 |
|
---|
4067 | #ifdef RT_ARCH_AMD64
|
---|
4068 | if (idxRegIndex == UINT8_MAX)
|
---|
4069 | {
|
---|
4070 | if (u32EffAddr == 0)
|
---|
4071 | {
|
---|
4072 | /* mov ret, base */
|
---|
4073 | off = iemNativeEmitLoadGprFromGpr32(pReNative, off, idxRegRet, idxRegBase);
|
---|
4074 | }
|
---|
4075 | else
|
---|
4076 | {
|
---|
4077 | /* lea ret32, [base64 + disp32] */
|
---|
4078 | Assert(idxRegBase != X86_GREG_xSP /*SIB*/);
|
---|
4079 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
|
---|
4080 | if (idxRegRet >= 8 || idxRegBase >= 8)
|
---|
4081 | pbCodeBuf[off++] = (idxRegRet >= 8 ? X86_OP_REX_R : 0) | (idxRegBase >= 8 ? X86_OP_REX_B : 0);
|
---|
4082 | pbCodeBuf[off++] = 0x8d;
|
---|
4083 | uint8_t const bMod = (int8_t)u32EffAddr == (int32_t)u32EffAddr ? X86_MOD_MEM1 : X86_MOD_MEM4;
|
---|
4084 | if (idxRegBase != X86_GREG_x12 /*SIB*/)
|
---|
4085 | pbCodeBuf[off++] = X86_MODRM_MAKE(bMod, idxRegRet & 7, idxRegBase & 7);
|
---|
4086 | else
|
---|
4087 | {
|
---|
4088 | pbCodeBuf[off++] = X86_MODRM_MAKE(bMod, idxRegRet & 7, 4 /*SIB*/);
|
---|
4089 | pbCodeBuf[off++] = X86_SIB_MAKE(X86_GREG_x12 & 7, 4 /*no index*/, 0);
|
---|
4090 | }
|
---|
4091 | pbCodeBuf[off++] = RT_BYTE1(u32EffAddr);
|
---|
4092 | if (bMod == X86_MOD_MEM4)
|
---|
4093 | {
|
---|
4094 | pbCodeBuf[off++] = RT_BYTE2(u32EffAddr);
|
---|
4095 | pbCodeBuf[off++] = RT_BYTE3(u32EffAddr);
|
---|
4096 | pbCodeBuf[off++] = RT_BYTE4(u32EffAddr);
|
---|
4097 | }
|
---|
4098 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
4099 | }
|
---|
4100 | }
|
---|
4101 | else
|
---|
4102 | {
|
---|
4103 | Assert(idxRegIndex != X86_GREG_xSP /*no-index*/);
|
---|
4104 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
|
---|
4105 | if (idxRegBase == UINT8_MAX)
|
---|
4106 | {
|
---|
4107 | /* lea ret32, [(index64 << cShiftIndex) + disp32] */
|
---|
4108 | if (idxRegRet >= 8 || idxRegIndex >= 8)
|
---|
4109 | pbCodeBuf[off++] = (idxRegRet >= 8 ? X86_OP_REX_R : 0)
|
---|
4110 | | (idxRegIndex >= 8 ? X86_OP_REX_X : 0);
|
---|
4111 | pbCodeBuf[off++] = 0x8d;
|
---|
4112 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM0, idxRegRet & 7, 4 /*SIB*/);
|
---|
4113 | pbCodeBuf[off++] = X86_SIB_MAKE(5 /*nobase/bp*/, idxRegIndex & 7, cShiftIndex);
|
---|
4114 | pbCodeBuf[off++] = RT_BYTE1(u32EffAddr);
|
---|
4115 | pbCodeBuf[off++] = RT_BYTE2(u32EffAddr);
|
---|
4116 | pbCodeBuf[off++] = RT_BYTE3(u32EffAddr);
|
---|
4117 | pbCodeBuf[off++] = RT_BYTE4(u32EffAddr);
|
---|
4118 | }
|
---|
4119 | else
|
---|
4120 | {
|
---|
4121 | /* lea ret32, [(index64 << cShiftIndex) + base64 (+ disp32)] */
|
---|
4122 | if (idxRegRet >= 8 || idxRegBase >= 8 || idxRegIndex >= 8)
|
---|
4123 | pbCodeBuf[off++] = (idxRegRet >= 8 ? X86_OP_REX_R : 0)
|
---|
4124 | | (idxRegBase >= 8 ? X86_OP_REX_B : 0)
|
---|
4125 | | (idxRegIndex >= 8 ? X86_OP_REX_X : 0);
|
---|
4126 | pbCodeBuf[off++] = 0x8d;
|
---|
4127 | uint8_t const bMod = u32EffAddr == 0 && (idxRegBase & 7) != X86_GREG_xBP ? X86_MOD_MEM0
|
---|
4128 | : (int8_t)u32EffAddr == (int32_t)u32EffAddr ? X86_MOD_MEM1 : X86_MOD_MEM4;
|
---|
4129 | pbCodeBuf[off++] = X86_MODRM_MAKE(bMod, idxRegRet & 7, 4 /*SIB*/);
|
---|
4130 | pbCodeBuf[off++] = X86_SIB_MAKE(idxRegBase & 7, idxRegIndex & 7, cShiftIndex);
|
---|
4131 | if (bMod != X86_MOD_MEM0)
|
---|
4132 | {
|
---|
4133 | pbCodeBuf[off++] = RT_BYTE1(u32EffAddr);
|
---|
4134 | if (bMod == X86_MOD_MEM4)
|
---|
4135 | {
|
---|
4136 | pbCodeBuf[off++] = RT_BYTE2(u32EffAddr);
|
---|
4137 | pbCodeBuf[off++] = RT_BYTE3(u32EffAddr);
|
---|
4138 | pbCodeBuf[off++] = RT_BYTE4(u32EffAddr);
|
---|
4139 | }
|
---|
4140 | }
|
---|
4141 | }
|
---|
4142 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
4143 | }
|
---|
4144 |
|
---|
4145 | #elif defined(RT_ARCH_ARM64)
|
---|
4146 | if (u32EffAddr == 0)
|
---|
4147 | {
|
---|
4148 | if (idxRegIndex == UINT8_MAX)
|
---|
4149 | off = iemNativeEmitLoadGprFromGpr32(pReNative, off, idxRegRet, idxRegBase);
|
---|
4150 | else if (idxRegBase == UINT8_MAX)
|
---|
4151 | {
|
---|
4152 | if (cShiftIndex == 0)
|
---|
4153 | off = iemNativeEmitLoadGprFromGpr32(pReNative, off, idxRegRet, idxRegIndex);
|
---|
4154 | else
|
---|
4155 | {
|
---|
4156 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
4157 | pu32CodeBuf[off++] = Armv8A64MkInstrLslImm(idxRegRet, idxRegIndex, cShiftIndex, false /*f64Bit*/);
|
---|
4158 | }
|
---|
4159 | }
|
---|
4160 | else
|
---|
4161 | {
|
---|
4162 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
4163 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, idxRegRet, idxRegBase, idxRegIndex,
|
---|
4164 | false /*f64Bit*/, false /*fSetFlags*/, cShiftIndex);
|
---|
4165 | }
|
---|
4166 | }
|
---|
4167 | else
|
---|
4168 | {
|
---|
4169 | if ((int32_t)u32EffAddr < 4096 && (int32_t)u32EffAddr >= 0 && idxRegBase != UINT8_MAX)
|
---|
4170 | {
|
---|
4171 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
4172 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, idxRegRet, idxRegBase, u32EffAddr, false /*f64Bit*/);
|
---|
4173 | }
|
---|
4174 | else if ((int32_t)u32EffAddr > -4096 && (int32_t)u32EffAddr < 0 && idxRegBase != UINT8_MAX)
|
---|
4175 | {
|
---|
4176 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
4177 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, idxRegRet, idxRegBase,
|
---|
4178 | (uint32_t)-(int32_t)u32EffAddr, false /*f64Bit*/);
|
---|
4179 | }
|
---|
4180 | else
|
---|
4181 | {
|
---|
4182 | off = iemNativeEmitLoadGprImm64(pReNative, off, idxRegRet, u32EffAddr);
|
---|
4183 | if (idxRegBase != UINT8_MAX)
|
---|
4184 | {
|
---|
4185 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
4186 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, idxRegRet, idxRegRet, idxRegBase, false /*f64Bit*/);
|
---|
4187 | }
|
---|
4188 | }
|
---|
4189 | if (idxRegIndex != UINT8_MAX)
|
---|
4190 | {
|
---|
4191 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
4192 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, idxRegRet, idxRegRet, idxRegIndex,
|
---|
4193 | false /*f64Bit*/, false /*fSetFlags*/, cShiftIndex);
|
---|
4194 | }
|
---|
4195 | }
|
---|
4196 |
|
---|
4197 | #else
|
---|
4198 | # error "port me"
|
---|
4199 | #endif
|
---|
4200 |
|
---|
4201 | if (idxRegIndex != UINT8_MAX)
|
---|
4202 | iemNativeRegFreeTmp(pReNative, idxRegIndex);
|
---|
4203 | if (idxRegBase != UINT8_MAX)
|
---|
4204 | iemNativeRegFreeTmp(pReNative, idxRegBase);
|
---|
4205 | iemNativeVarRegisterRelease(pReNative, idxVarRet);
|
---|
4206 | return off;
|
---|
4207 | }
|
---|
4208 |
|
---|
4209 |
|
---|
4210 | #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
|
---|
4211 | off = iemNativeEmitCalcRmEffAddrThreadedAddr64(pReNative, off, a_bRmEx, a_uSibAndRspOffset, \
|
---|
4212 | a_u32Disp, a_cbImm, a_GCPtrEff, true /*f64Bit*/)
|
---|
4213 |
|
---|
4214 | #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
|
---|
4215 | off = iemNativeEmitCalcRmEffAddrThreadedAddr64(pReNative, off, a_bRmEx, a_uSibAndRspOffset, \
|
---|
4216 | a_u32Disp, a_cbImm, a_GCPtrEff, true /*f64Bit*/)
|
---|
4217 |
|
---|
4218 | #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32(a_GCPtrEff, a_bRmEx, a_uSibAndRspOffset, a_u32Disp, a_cbImm) \
|
---|
4219 | off = iemNativeEmitCalcRmEffAddrThreadedAddr64(pReNative, off, a_bRmEx, a_uSibAndRspOffset, \
|
---|
4220 | a_u32Disp, a_cbImm, a_GCPtrEff, false /*f64Bit*/)
|
---|
4221 |
|
---|
4222 | /**
|
---|
4223 | * Emit code for IEM_MC_CALC_RM_EFF_ADDR_THREADED_64*.
|
---|
4224 | *
|
---|
4225 | * @returns New off.
|
---|
4226 | * @param pReNative .
|
---|
4227 | * @param off .
|
---|
4228 | * @param bRmEx The ModRM byte but with bit 3 set to REX.B and
|
---|
4229 | * bit 4 to REX.X. The two bits are part of the
|
---|
4230 | * REG sub-field, which isn't needed in this
|
---|
4231 | * function.
|
---|
4232 | * @param uSibAndRspOffset Two parts:
|
---|
4233 | * - The first 8 bits make up the SIB byte.
|
---|
4234 | * - The next 8 bits are the fixed RSP/ESP offset
|
---|
4235 | * in case of a pop [xSP].
|
---|
4236 | * @param u32Disp The displacement byte/word/dword, if any.
|
---|
4237 | * @param cbInstr The size of the fully decoded instruction. Used
|
---|
4238 | * for RIP relative addressing.
|
---|
4239 | * @param idxVarRet The result variable number.
|
---|
4240 | * @param f64Bit Whether to use a 64-bit or 32-bit address size
|
---|
4241 | * when calculating the address.
|
---|
4242 | *
|
---|
4243 | * @see iemOpHlpCalcRmEffAddrThreadedAddr64
|
---|
4244 | */
|
---|
4245 | DECL_INLINE_THROW(uint32_t)
|
---|
4246 | iemNativeEmitCalcRmEffAddrThreadedAddr64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t bRmEx, uint32_t uSibAndRspOffset,
|
---|
4247 | uint32_t u32Disp, uint8_t cbInstr, uint8_t idxVarRet, bool f64Bit)
|
---|
4248 | {
|
---|
4249 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarRet);
|
---|
4250 |
|
---|
4251 | /*
|
---|
4252 | * Special case the rip + disp32 form first.
|
---|
4253 | */
|
---|
4254 | if ((bRmEx & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
|
---|
4255 | {
|
---|
4256 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
4257 | /* Need to take the current PC offset into account for the displacement, no need to flush here
|
---|
4258 | * as the PC is only accessed readonly and there is no branching or calling helpers involved. */
|
---|
4259 | u32Disp += pReNative->Core.offPc;
|
---|
4260 | #endif
|
---|
4261 |
|
---|
4262 | uint8_t const idxRegRet = iemNativeVarRegisterAcquire(pReNative, idxVarRet, &off);
|
---|
4263 | uint8_t const idxRegPc = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc,
|
---|
4264 | kIemNativeGstRegUse_ReadOnly);
|
---|
4265 | #ifdef RT_ARCH_AMD64
|
---|
4266 | if (f64Bit)
|
---|
4267 | {
|
---|
4268 | int64_t const offFinalDisp = (int64_t)(int32_t)u32Disp + cbInstr;
|
---|
4269 | if ((int32_t)offFinalDisp == offFinalDisp)
|
---|
4270 | off = iemNativeEmitLoadGprFromGprWithAddendMaybeZero(pReNative, off, idxRegRet, idxRegPc, (int32_t)offFinalDisp);
|
---|
4271 | else
|
---|
4272 | {
|
---|
4273 | off = iemNativeEmitLoadGprFromGprWithAddend(pReNative, off, idxRegRet, idxRegPc, (int32_t)u32Disp);
|
---|
4274 | off = iemNativeEmitAddGprImm8(pReNative, off, idxRegRet, cbInstr);
|
---|
4275 | }
|
---|
4276 | }
|
---|
4277 | else
|
---|
4278 | off = iemNativeEmitLoadGprFromGpr32WithAddendMaybeZero(pReNative, off, idxRegRet, idxRegPc, (int32_t)u32Disp + cbInstr);
|
---|
4279 |
|
---|
4280 | #elif defined(RT_ARCH_ARM64)
|
---|
4281 | if (f64Bit)
|
---|
4282 | off = iemNativeEmitLoadGprFromGprWithAddendMaybeZero(pReNative, off, idxRegRet, idxRegPc,
|
---|
4283 | (int64_t)(int32_t)u32Disp + cbInstr);
|
---|
4284 | else
|
---|
4285 | off = iemNativeEmitLoadGprFromGpr32WithAddendMaybeZero(pReNative, off, idxRegRet, idxRegPc,
|
---|
4286 | (int32_t)u32Disp + cbInstr);
|
---|
4287 |
|
---|
4288 | #else
|
---|
4289 | # error "Port me!"
|
---|
4290 | #endif
|
---|
4291 | iemNativeRegFreeTmp(pReNative, idxRegPc);
|
---|
4292 | iemNativeVarRegisterRelease(pReNative, idxVarRet);
|
---|
4293 | return off;
|
---|
4294 | }
|
---|
4295 |
|
---|
4296 | /* Calculate the fixed displacement (more down in SIB.B=4 and SIB.B=5 on this). */
|
---|
4297 | int64_t i64EffAddr = 0;
|
---|
4298 | switch ((bRmEx >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
|
---|
4299 | {
|
---|
4300 | case 0: break;
|
---|
4301 | case 1: i64EffAddr = (int8_t)u32Disp; break;
|
---|
4302 | case 2: i64EffAddr = (int32_t)u32Disp; break;
|
---|
4303 | default: AssertFailed();
|
---|
4304 | }
|
---|
4305 |
|
---|
4306 | /* Get the register (or SIB) value. */
|
---|
4307 | uint8_t idxGstRegBase = UINT8_MAX;
|
---|
4308 | uint8_t idxGstRegIndex = UINT8_MAX;
|
---|
4309 | uint8_t cShiftIndex = 0;
|
---|
4310 | if ((bRmEx & X86_MODRM_RM_MASK) != 4)
|
---|
4311 | idxGstRegBase = bRmEx & (X86_MODRM_RM_MASK | 0x8); /* bRmEx[bit 3] = REX.B */
|
---|
4312 | else /* SIB: */
|
---|
4313 | {
|
---|
4314 | /* index /w scaling . */
|
---|
4315 | cShiftIndex = (uSibAndRspOffset >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
|
---|
4316 | idxGstRegIndex = ((uSibAndRspOffset >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
|
---|
4317 | | ((bRmEx & 0x10) >> 1); /* bRmEx[bit 4] = REX.X */
|
---|
4318 | if (idxGstRegIndex == 4)
|
---|
4319 | {
|
---|
4320 | /* no index */
|
---|
4321 | cShiftIndex = 0;
|
---|
4322 | idxGstRegIndex = UINT8_MAX;
|
---|
4323 | }
|
---|
4324 |
|
---|
4325 | /* base */
|
---|
4326 | idxGstRegBase = (uSibAndRspOffset & X86_SIB_BASE_MASK) | (bRmEx & 0x8); /* bRmEx[bit 3] = REX.B */
|
---|
4327 | if (idxGstRegBase == 4)
|
---|
4328 | {
|
---|
4329 | /* pop [rsp] hack */
|
---|
4330 | i64EffAddr += uSibAndRspOffset >> 8; /* (this is why i64EffAddr must be 64-bit) */
|
---|
4331 | }
|
---|
4332 | else if ( (idxGstRegBase & X86_SIB_BASE_MASK) == 5
|
---|
4333 | && (bRmEx & X86_MODRM_MOD_MASK) == 0)
|
---|
4334 | {
|
---|
4335 | /* mod=0 and base=5 -> disp32, no base reg. */
|
---|
4336 | Assert(i64EffAddr == 0);
|
---|
4337 | i64EffAddr = (int32_t)u32Disp;
|
---|
4338 | idxGstRegBase = UINT8_MAX;
|
---|
4339 | }
|
---|
4340 | }
|
---|
4341 |
|
---|
4342 | /*
|
---|
4343 | * If no registers are involved (SIB.B=5, SIB.X=4) repeat what we did at
|
---|
4344 | * the start of the function.
|
---|
4345 | */
|
---|
4346 | if (idxGstRegBase == UINT8_MAX && idxGstRegIndex == UINT8_MAX)
|
---|
4347 | {
|
---|
4348 | if (f64Bit)
|
---|
4349 | iemNativeVarSetKindToConst(pReNative, idxVarRet, (uint64_t)i64EffAddr);
|
---|
4350 | else
|
---|
4351 | iemNativeVarSetKindToConst(pReNative, idxVarRet, (uint32_t)i64EffAddr);
|
---|
4352 | return off;
|
---|
4353 | }
|
---|
4354 |
|
---|
4355 | /*
|
---|
4356 | * Now emit code that calculates:
|
---|
4357 | * idxRegRet = (uint64_t)(i64EffAddr [+ idxGstRegBase] [+ (idxGstRegIndex << cShiftIndex)])
|
---|
4358 | * or if !f64Bit:
|
---|
4359 | * idxRegRet = (uint32_t)(i64EffAddr [+ idxGstRegBase] [+ (idxGstRegIndex << cShiftIndex)])
|
---|
4360 | */
|
---|
4361 | uint8_t const idxRegRet = iemNativeVarRegisterAcquire(pReNative, idxVarRet, &off);
|
---|
4362 | uint8_t idxRegBase = idxGstRegBase == UINT8_MAX ? UINT8_MAX
|
---|
4363 | : iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(idxGstRegBase),
|
---|
4364 | kIemNativeGstRegUse_ReadOnly);
|
---|
4365 | uint8_t idxRegIndex = idxGstRegIndex == UINT8_MAX ? UINT8_MAX
|
---|
4366 | : iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(idxGstRegIndex),
|
---|
4367 | kIemNativeGstRegUse_ReadOnly);
|
---|
4368 |
|
---|
4369 | /* If base is not given and there is no shifting, swap the registers to avoid code duplication. */
|
---|
4370 | if (idxRegBase == UINT8_MAX && cShiftIndex == 0)
|
---|
4371 | {
|
---|
4372 | idxRegBase = idxRegIndex;
|
---|
4373 | idxRegIndex = UINT8_MAX;
|
---|
4374 | }
|
---|
4375 |
|
---|
4376 | #ifdef RT_ARCH_AMD64
|
---|
4377 | uint8_t bFinalAdj;
|
---|
4378 | if (!f64Bit || (int32_t)i64EffAddr == i64EffAddr)
|
---|
4379 | bFinalAdj = 0; /* likely */
|
---|
4380 | else
|
---|
4381 | {
|
---|
4382 | /* pop [rsp] with a problematic disp32 value. Split out the
|
---|
4383 | RSP offset and add it separately afterwards (bFinalAdj). */
|
---|
4384 | /** @todo testcase: pop [rsp] with problematic disp32 (mod4). */
|
---|
4385 | Assert(idxGstRegBase == X86_GREG_xSP);
|
---|
4386 | Assert(((bRmEx >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) == X86_MOD_MEM4);
|
---|
4387 | bFinalAdj = (uint8_t)(uSibAndRspOffset >> 8);
|
---|
4388 | Assert(bFinalAdj != 0);
|
---|
4389 | i64EffAddr -= bFinalAdj;
|
---|
4390 | Assert((int32_t)i64EffAddr == i64EffAddr);
|
---|
4391 | }
|
---|
4392 | uint32_t const u32EffAddr = (uint32_t)i64EffAddr;
|
---|
4393 | //pReNative->pInstrBuf[off++] = 0xcc;
|
---|
4394 |
|
---|
4395 | if (idxRegIndex == UINT8_MAX)
|
---|
4396 | {
|
---|
4397 | if (u32EffAddr == 0)
|
---|
4398 | {
|
---|
4399 | /* mov ret, base */
|
---|
4400 | if (f64Bit)
|
---|
4401 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegRet, idxRegBase);
|
---|
4402 | else
|
---|
4403 | off = iemNativeEmitLoadGprFromGpr32(pReNative, off, idxRegRet, idxRegBase);
|
---|
4404 | }
|
---|
4405 | else
|
---|
4406 | {
|
---|
4407 | /* lea ret, [base + disp32] */
|
---|
4408 | Assert(idxRegBase != X86_GREG_xSP /*SIB*/);
|
---|
4409 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
|
---|
4410 | if (f64Bit || idxRegRet >= 8 || idxRegBase >= 8)
|
---|
4411 | pbCodeBuf[off++] = (idxRegRet >= 8 ? X86_OP_REX_R : 0)
|
---|
4412 | | (idxRegBase >= 8 ? X86_OP_REX_B : 0)
|
---|
4413 | | (f64Bit ? X86_OP_REX_W : 0);
|
---|
4414 | pbCodeBuf[off++] = 0x8d;
|
---|
4415 | uint8_t const bMod = (int8_t)u32EffAddr == (int32_t)u32EffAddr ? X86_MOD_MEM1 : X86_MOD_MEM4;
|
---|
4416 | if (idxRegBase != X86_GREG_x12 /*SIB*/)
|
---|
4417 | pbCodeBuf[off++] = X86_MODRM_MAKE(bMod, idxRegRet & 7, idxRegBase & 7);
|
---|
4418 | else
|
---|
4419 | {
|
---|
4420 | pbCodeBuf[off++] = X86_MODRM_MAKE(bMod, idxRegRet & 7, 4 /*SIB*/);
|
---|
4421 | pbCodeBuf[off++] = X86_SIB_MAKE(X86_GREG_x12 & 7, 4 /*no index*/, 0);
|
---|
4422 | }
|
---|
4423 | pbCodeBuf[off++] = RT_BYTE1(u32EffAddr);
|
---|
4424 | if (bMod == X86_MOD_MEM4)
|
---|
4425 | {
|
---|
4426 | pbCodeBuf[off++] = RT_BYTE2(u32EffAddr);
|
---|
4427 | pbCodeBuf[off++] = RT_BYTE3(u32EffAddr);
|
---|
4428 | pbCodeBuf[off++] = RT_BYTE4(u32EffAddr);
|
---|
4429 | }
|
---|
4430 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
4431 | }
|
---|
4432 | }
|
---|
4433 | else
|
---|
4434 | {
|
---|
4435 | Assert(idxRegIndex != X86_GREG_xSP /*no-index*/);
|
---|
4436 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
|
---|
4437 | if (idxRegBase == UINT8_MAX)
|
---|
4438 | {
|
---|
4439 | /* lea ret, [(index64 << cShiftIndex) + disp32] */
|
---|
4440 | if (f64Bit || idxRegRet >= 8 || idxRegIndex >= 8)
|
---|
4441 | pbCodeBuf[off++] = (idxRegRet >= 8 ? X86_OP_REX_R : 0)
|
---|
4442 | | (idxRegIndex >= 8 ? X86_OP_REX_X : 0)
|
---|
4443 | | (f64Bit ? X86_OP_REX_W : 0);
|
---|
4444 | pbCodeBuf[off++] = 0x8d;
|
---|
4445 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM0, idxRegRet & 7, 4 /*SIB*/);
|
---|
4446 | pbCodeBuf[off++] = X86_SIB_MAKE(5 /*nobase/bp*/, idxRegIndex & 7, cShiftIndex);
|
---|
4447 | pbCodeBuf[off++] = RT_BYTE1(u32EffAddr);
|
---|
4448 | pbCodeBuf[off++] = RT_BYTE2(u32EffAddr);
|
---|
4449 | pbCodeBuf[off++] = RT_BYTE3(u32EffAddr);
|
---|
4450 | pbCodeBuf[off++] = RT_BYTE4(u32EffAddr);
|
---|
4451 | }
|
---|
4452 | else
|
---|
4453 | {
|
---|
4454 | /* lea ret, [(index64 << cShiftIndex) + base64 (+ disp32)] */
|
---|
4455 | if (f64Bit || idxRegRet >= 8 || idxRegBase >= 8 || idxRegIndex >= 8)
|
---|
4456 | pbCodeBuf[off++] = (idxRegRet >= 8 ? X86_OP_REX_R : 0)
|
---|
4457 | | (idxRegBase >= 8 ? X86_OP_REX_B : 0)
|
---|
4458 | | (idxRegIndex >= 8 ? X86_OP_REX_X : 0)
|
---|
4459 | | (f64Bit ? X86_OP_REX_W : 0);
|
---|
4460 | pbCodeBuf[off++] = 0x8d;
|
---|
4461 | uint8_t const bMod = u32EffAddr == 0 && (idxRegBase & 7) != X86_GREG_xBP ? X86_MOD_MEM0
|
---|
4462 | : (int8_t)u32EffAddr == (int32_t)u32EffAddr ? X86_MOD_MEM1 : X86_MOD_MEM4;
|
---|
4463 | pbCodeBuf[off++] = X86_MODRM_MAKE(bMod, idxRegRet & 7, 4 /*SIB*/);
|
---|
4464 | pbCodeBuf[off++] = X86_SIB_MAKE(idxRegBase & 7, idxRegIndex & 7, cShiftIndex);
|
---|
4465 | if (bMod != X86_MOD_MEM0)
|
---|
4466 | {
|
---|
4467 | pbCodeBuf[off++] = RT_BYTE1(u32EffAddr);
|
---|
4468 | if (bMod == X86_MOD_MEM4)
|
---|
4469 | {
|
---|
4470 | pbCodeBuf[off++] = RT_BYTE2(u32EffAddr);
|
---|
4471 | pbCodeBuf[off++] = RT_BYTE3(u32EffAddr);
|
---|
4472 | pbCodeBuf[off++] = RT_BYTE4(u32EffAddr);
|
---|
4473 | }
|
---|
4474 | }
|
---|
4475 | }
|
---|
4476 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
4477 | }
|
---|
4478 |
|
---|
4479 | if (!bFinalAdj)
|
---|
4480 | { /* likely */ }
|
---|
4481 | else
|
---|
4482 | {
|
---|
4483 | Assert(f64Bit);
|
---|
4484 | off = iemNativeEmitAddGprImm8(pReNative, off, idxRegRet, bFinalAdj);
|
---|
4485 | }
|
---|
4486 |
|
---|
4487 | #elif defined(RT_ARCH_ARM64)
|
---|
4488 | if (i64EffAddr == 0)
|
---|
4489 | {
|
---|
4490 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
4491 | if (idxRegIndex == UINT8_MAX)
|
---|
4492 | pu32CodeBuf[off++] = Armv8A64MkInstrMov(idxRegRet, idxRegBase, f64Bit);
|
---|
4493 | else if (idxRegBase != UINT8_MAX)
|
---|
4494 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, idxRegRet, idxRegBase, idxRegIndex,
|
---|
4495 | f64Bit, false /*fSetFlags*/, cShiftIndex);
|
---|
4496 | else
|
---|
4497 | {
|
---|
4498 | Assert(cShiftIndex != 0); /* See base = index swap above when shift is 0 and we have no base reg. */
|
---|
4499 | pu32CodeBuf[off++] = Armv8A64MkInstrLslImm(idxRegRet, idxRegIndex, cShiftIndex, f64Bit);
|
---|
4500 | }
|
---|
4501 | }
|
---|
4502 | else
|
---|
4503 | {
|
---|
4504 | if (f64Bit)
|
---|
4505 | { /* likely */ }
|
---|
4506 | else
|
---|
4507 | i64EffAddr = (int32_t)i64EffAddr;
|
---|
4508 |
|
---|
4509 | if (i64EffAddr < 4096 && i64EffAddr >= 0 && idxRegBase != UINT8_MAX)
|
---|
4510 | {
|
---|
4511 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
4512 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, idxRegRet, idxRegBase, i64EffAddr, f64Bit);
|
---|
4513 | }
|
---|
4514 | else if (i64EffAddr > -4096 && i64EffAddr < 0 && idxRegBase != UINT8_MAX)
|
---|
4515 | {
|
---|
4516 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
4517 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, idxRegRet, idxRegBase, (uint32_t)-i64EffAddr, f64Bit);
|
---|
4518 | }
|
---|
4519 | else
|
---|
4520 | {
|
---|
4521 | if (f64Bit)
|
---|
4522 | off = iemNativeEmitLoadGprImm64(pReNative, off, idxRegRet, i64EffAddr);
|
---|
4523 | else
|
---|
4524 | off = iemNativeEmitLoadGprImm64(pReNative, off, idxRegRet, (uint32_t)i64EffAddr);
|
---|
4525 | if (idxRegBase != UINT8_MAX)
|
---|
4526 | {
|
---|
4527 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
4528 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, idxRegRet, idxRegRet, idxRegBase, f64Bit);
|
---|
4529 | }
|
---|
4530 | }
|
---|
4531 | if (idxRegIndex != UINT8_MAX)
|
---|
4532 | {
|
---|
4533 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
|
---|
4534 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, idxRegRet, idxRegRet, idxRegIndex,
|
---|
4535 | f64Bit, false /*fSetFlags*/, cShiftIndex);
|
---|
4536 | }
|
---|
4537 | }
|
---|
4538 |
|
---|
4539 | #else
|
---|
4540 | # error "port me"
|
---|
4541 | #endif
|
---|
4542 |
|
---|
4543 | if (idxRegIndex != UINT8_MAX)
|
---|
4544 | iemNativeRegFreeTmp(pReNative, idxRegIndex);
|
---|
4545 | if (idxRegBase != UINT8_MAX)
|
---|
4546 | iemNativeRegFreeTmp(pReNative, idxRegBase);
|
---|
4547 | iemNativeVarRegisterRelease(pReNative, idxVarRet);
|
---|
4548 | return off;
|
---|
4549 | }
|
---|
4550 |
|
---|
4551 |
|
---|
4552 | /*********************************************************************************************************************************
|
---|
4553 | * Memory fetches and stores common *
|
---|
4554 | *********************************************************************************************************************************/
|
---|
4555 |
|
---|
4556 | typedef enum IEMNATIVEMITMEMOP
|
---|
4557 | {
|
---|
4558 | kIemNativeEmitMemOp_Store = 0,
|
---|
4559 | kIemNativeEmitMemOp_Fetch,
|
---|
4560 | kIemNativeEmitMemOp_Fetch_Zx_U16,
|
---|
4561 | kIemNativeEmitMemOp_Fetch_Zx_U32,
|
---|
4562 | kIemNativeEmitMemOp_Fetch_Zx_U64,
|
---|
4563 | kIemNativeEmitMemOp_Fetch_Sx_U16,
|
---|
4564 | kIemNativeEmitMemOp_Fetch_Sx_U32,
|
---|
4565 | kIemNativeEmitMemOp_Fetch_Sx_U64
|
---|
4566 | } IEMNATIVEMITMEMOP;
|
---|
4567 |
|
---|
4568 | /** Emits code for IEM_MC_FETCH_MEM_U8/16/32/64 and IEM_MC_STORE_MEM_U8/16/32/64,
|
---|
4569 | * and IEM_MC_FETCH_MEM_FLAT_U8/16/32/64 and IEM_MC_STORE_MEM_FLAT_U8/16/32/64
|
---|
4570 | * (with iSegReg = UINT8_MAX). */
|
---|
4571 | DECL_INLINE_THROW(uint32_t)
|
---|
4572 | iemNativeEmitMemFetchStoreDataCommon(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarValue, uint8_t iSegReg,
|
---|
4573 | uint8_t idxVarGCPtrMem, uint8_t cbMem, uint8_t fAlignMask, IEMNATIVEMITMEMOP enmOp,
|
---|
4574 | uintptr_t pfnFunction, uint8_t idxInstr, uint8_t offDisp = 0)
|
---|
4575 | {
|
---|
4576 | /*
|
---|
4577 | * Assert sanity.
|
---|
4578 | */
|
---|
4579 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarValue);
|
---|
4580 | PIEMNATIVEVAR const pVarValue = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVarValue)];
|
---|
4581 | Assert( enmOp != kIemNativeEmitMemOp_Store
|
---|
4582 | || pVarValue->enmKind == kIemNativeVarKind_Immediate
|
---|
4583 | || pVarValue->enmKind == kIemNativeVarKind_Stack);
|
---|
4584 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarGCPtrMem);
|
---|
4585 | PIEMNATIVEVAR const pVarGCPtrMem = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVarGCPtrMem)];
|
---|
4586 | AssertStmt( pVarGCPtrMem->enmKind == kIemNativeVarKind_Immediate
|
---|
4587 | || pVarGCPtrMem->enmKind == kIemNativeVarKind_Stack,
|
---|
4588 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
|
---|
4589 | Assert(iSegReg < 6 || iSegReg == UINT8_MAX);
|
---|
4590 | Assert(cbMem == 1 || cbMem == 2 || cbMem == 4 || cbMem == 8);
|
---|
4591 | AssertCompile(IEMNATIVE_CALL_ARG_GREG_COUNT >= 4);
|
---|
4592 | #ifdef VBOX_STRICT
|
---|
4593 | if (iSegReg == UINT8_MAX)
|
---|
4594 | {
|
---|
4595 | Assert( (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
|
---|
4596 | || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
|
---|
4597 | || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT);
|
---|
4598 | switch (cbMem)
|
---|
4599 | {
|
---|
4600 | case 1:
|
---|
4601 | Assert( pfnFunction
|
---|
4602 | == ( enmOp == kIemNativeEmitMemOp_Store ? (uintptr_t)iemNativeHlpMemFlatStoreDataU8
|
---|
4603 | : enmOp == kIemNativeEmitMemOp_Fetch ? (uintptr_t)iemNativeHlpMemFlatFetchDataU8
|
---|
4604 | : enmOp == kIemNativeEmitMemOp_Fetch_Zx_U16 ? (uintptr_t)iemNativeHlpMemFlatFetchDataU8
|
---|
4605 | : enmOp == kIemNativeEmitMemOp_Fetch_Zx_U32 ? (uintptr_t)iemNativeHlpMemFlatFetchDataU8
|
---|
4606 | : enmOp == kIemNativeEmitMemOp_Fetch_Zx_U64 ? (uintptr_t)iemNativeHlpMemFlatFetchDataU8
|
---|
4607 | : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U16 ? (uintptr_t)iemNativeHlpMemFlatFetchDataU8_Sx_U16
|
---|
4608 | : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U32 ? (uintptr_t)iemNativeHlpMemFlatFetchDataU8_Sx_U32
|
---|
4609 | : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U64 ? (uintptr_t)iemNativeHlpMemFlatFetchDataU8_Sx_U64
|
---|
4610 | : UINT64_C(0xc000b000a0009000) ));
|
---|
4611 | break;
|
---|
4612 | case 2:
|
---|
4613 | Assert( pfnFunction
|
---|
4614 | == ( enmOp == kIemNativeEmitMemOp_Store ? (uintptr_t)iemNativeHlpMemFlatStoreDataU16
|
---|
4615 | : enmOp == kIemNativeEmitMemOp_Fetch ? (uintptr_t)iemNativeHlpMemFlatFetchDataU16
|
---|
4616 | : enmOp == kIemNativeEmitMemOp_Fetch_Zx_U32 ? (uintptr_t)iemNativeHlpMemFlatFetchDataU16
|
---|
4617 | : enmOp == kIemNativeEmitMemOp_Fetch_Zx_U64 ? (uintptr_t)iemNativeHlpMemFlatFetchDataU16
|
---|
4618 | : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U32 ? (uintptr_t)iemNativeHlpMemFlatFetchDataU16_Sx_U32
|
---|
4619 | : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U64 ? (uintptr_t)iemNativeHlpMemFlatFetchDataU16_Sx_U64
|
---|
4620 | : UINT64_C(0xc000b000a0009000) ));
|
---|
4621 | break;
|
---|
4622 | case 4:
|
---|
4623 | Assert( pfnFunction
|
---|
4624 | == ( enmOp == kIemNativeEmitMemOp_Store ? (uintptr_t)iemNativeHlpMemFlatStoreDataU32
|
---|
4625 | : enmOp == kIemNativeEmitMemOp_Fetch ? (uintptr_t)iemNativeHlpMemFlatFetchDataU32
|
---|
4626 | : enmOp == kIemNativeEmitMemOp_Fetch_Zx_U64 ? (uintptr_t)iemNativeHlpMemFlatFetchDataU32
|
---|
4627 | : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U64 ? (uintptr_t)iemNativeHlpMemFlatFetchDataU32_Sx_U64
|
---|
4628 | : UINT64_C(0xc000b000a0009000) ));
|
---|
4629 | break;
|
---|
4630 | case 8:
|
---|
4631 | Assert( pfnFunction
|
---|
4632 | == ( enmOp == kIemNativeEmitMemOp_Store ? (uintptr_t)iemNativeHlpMemFlatStoreDataU64
|
---|
4633 | : enmOp == kIemNativeEmitMemOp_Fetch ? (uintptr_t)iemNativeHlpMemFlatFetchDataU64
|
---|
4634 | : UINT64_C(0xc000b000a0009000) ));
|
---|
4635 | break;
|
---|
4636 | }
|
---|
4637 | }
|
---|
4638 | else
|
---|
4639 | {
|
---|
4640 | Assert(iSegReg < 6);
|
---|
4641 | switch (cbMem)
|
---|
4642 | {
|
---|
4643 | case 1:
|
---|
4644 | Assert( pfnFunction
|
---|
4645 | == ( enmOp == kIemNativeEmitMemOp_Store ? (uintptr_t)iemNativeHlpMemStoreDataU8
|
---|
4646 | : enmOp == kIemNativeEmitMemOp_Fetch ? (uintptr_t)iemNativeHlpMemFetchDataU8
|
---|
4647 | : enmOp == kIemNativeEmitMemOp_Fetch_Zx_U16 ? (uintptr_t)iemNativeHlpMemFetchDataU8
|
---|
4648 | : enmOp == kIemNativeEmitMemOp_Fetch_Zx_U32 ? (uintptr_t)iemNativeHlpMemFetchDataU8
|
---|
4649 | : enmOp == kIemNativeEmitMemOp_Fetch_Zx_U64 ? (uintptr_t)iemNativeHlpMemFetchDataU8
|
---|
4650 | : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U16 ? (uintptr_t)iemNativeHlpMemFetchDataU8_Sx_U16
|
---|
4651 | : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U32 ? (uintptr_t)iemNativeHlpMemFetchDataU8_Sx_U32
|
---|
4652 | : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U64 ? (uintptr_t)iemNativeHlpMemFetchDataU8_Sx_U64
|
---|
4653 | : UINT64_C(0xc000b000a0009000) ));
|
---|
4654 | break;
|
---|
4655 | case 2:
|
---|
4656 | Assert( pfnFunction
|
---|
4657 | == ( enmOp == kIemNativeEmitMemOp_Store ? (uintptr_t)iemNativeHlpMemStoreDataU16
|
---|
4658 | : enmOp == kIemNativeEmitMemOp_Fetch ? (uintptr_t)iemNativeHlpMemFetchDataU16
|
---|
4659 | : enmOp == kIemNativeEmitMemOp_Fetch_Zx_U32 ? (uintptr_t)iemNativeHlpMemFetchDataU16
|
---|
4660 | : enmOp == kIemNativeEmitMemOp_Fetch_Zx_U64 ? (uintptr_t)iemNativeHlpMemFetchDataU16
|
---|
4661 | : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U32 ? (uintptr_t)iemNativeHlpMemFetchDataU16_Sx_U32
|
---|
4662 | : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U64 ? (uintptr_t)iemNativeHlpMemFetchDataU16_Sx_U64
|
---|
4663 | : UINT64_C(0xc000b000a0009000) ));
|
---|
4664 | break;
|
---|
4665 | case 4:
|
---|
4666 | Assert( pfnFunction
|
---|
4667 | == ( enmOp == kIemNativeEmitMemOp_Store ? (uintptr_t)iemNativeHlpMemStoreDataU32
|
---|
4668 | : enmOp == kIemNativeEmitMemOp_Fetch ? (uintptr_t)iemNativeHlpMemFetchDataU32
|
---|
4669 | : enmOp == kIemNativeEmitMemOp_Fetch_Zx_U64 ? (uintptr_t)iemNativeHlpMemFetchDataU32
|
---|
4670 | : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U64 ? (uintptr_t)iemNativeHlpMemFetchDataU32_Sx_U64
|
---|
4671 | : UINT64_C(0xc000b000a0009000) ));
|
---|
4672 | break;
|
---|
4673 | case 8:
|
---|
4674 | Assert( pfnFunction
|
---|
4675 | == ( enmOp == kIemNativeEmitMemOp_Store ? (uintptr_t)iemNativeHlpMemStoreDataU64
|
---|
4676 | : enmOp == kIemNativeEmitMemOp_Fetch ? (uintptr_t)iemNativeHlpMemFetchDataU64
|
---|
4677 | : UINT64_C(0xc000b000a0009000) ));
|
---|
4678 | break;
|
---|
4679 | }
|
---|
4680 | }
|
---|
4681 | #endif
|
---|
4682 |
|
---|
4683 | #ifdef VBOX_STRICT
|
---|
4684 | /*
|
---|
4685 | * Check that the fExec flags we've got make sense.
|
---|
4686 | */
|
---|
4687 | off = iemNativeEmitExecFlagsCheck(pReNative, off, pReNative->fExec);
|
---|
4688 | #endif
|
---|
4689 |
|
---|
4690 | /*
|
---|
4691 | * To keep things simple we have to commit any pending writes first as we
|
---|
4692 | * may end up making calls.
|
---|
4693 | */
|
---|
4694 | /** @todo we could postpone this till we make the call and reload the
|
---|
4695 | * registers after returning from the call. Not sure if that's sensible or
|
---|
4696 | * not, though. */
|
---|
4697 | #ifndef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
4698 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
4699 | #else
|
---|
4700 | /* The program counter is treated differently for now. */
|
---|
4701 | off = iemNativeRegFlushPendingWrites(pReNative, off, RT_BIT_64(kIemNativeGstReg_Pc));
|
---|
4702 | #endif
|
---|
4703 |
|
---|
4704 | #ifdef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP
|
---|
4705 | /*
|
---|
4706 | * Move/spill/flush stuff out of call-volatile registers.
|
---|
4707 | * This is the easy way out. We could contain this to the tlb-miss branch
|
---|
4708 | * by saving and restoring active stuff here.
|
---|
4709 | */
|
---|
4710 | off = iemNativeRegMoveAndFreeAndFlushAtCall(pReNative, off, 0 /* vacate all non-volatile regs */);
|
---|
4711 | #endif
|
---|
4712 |
|
---|
4713 | /*
|
---|
4714 | * Define labels and allocate the result register (trying for the return
|
---|
4715 | * register if we can).
|
---|
4716 | */
|
---|
4717 | uint16_t const uTlbSeqNo = pReNative->uTlbSeqNo++;
|
---|
4718 | uint8_t const idxRegValueFetch = enmOp == kIemNativeEmitMemOp_Store ? UINT8_MAX
|
---|
4719 | : !(pReNative->Core.bmHstRegs & RT_BIT_32(IEMNATIVE_CALL_RET_GREG))
|
---|
4720 | ? iemNativeVarRegisterSetAndAcquire(pReNative, idxVarValue, IEMNATIVE_CALL_RET_GREG, &off)
|
---|
4721 | : iemNativeVarRegisterAcquire(pReNative, idxVarValue, &off);
|
---|
4722 | IEMNATIVEEMITTLBSTATE const TlbState(pReNative, &off, idxVarGCPtrMem, iSegReg, cbMem, offDisp);
|
---|
4723 | uint8_t const idxRegValueStore = !TlbState.fSkip
|
---|
4724 | && enmOp == kIemNativeEmitMemOp_Store
|
---|
4725 | && pVarValue->enmKind != kIemNativeVarKind_Immediate
|
---|
4726 | ? iemNativeVarRegisterAcquire(pReNative, idxVarValue, &off)
|
---|
4727 | : UINT8_MAX;
|
---|
4728 | uint32_t const idxRegMemResult = !TlbState.fSkip ? iemNativeRegAllocTmp(pReNative, &off) : UINT8_MAX;
|
---|
4729 | uint32_t const idxLabelTlbLookup = !TlbState.fSkip
|
---|
4730 | ? iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbLookup, UINT32_MAX, uTlbSeqNo)
|
---|
4731 | : UINT32_MAX;
|
---|
4732 |
|
---|
4733 | /*
|
---|
4734 | * Jump to the TLB lookup code.
|
---|
4735 | */
|
---|
4736 | if (!TlbState.fSkip)
|
---|
4737 | off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbLookup); /** @todo short jump */
|
---|
4738 |
|
---|
4739 | /*
|
---|
4740 | * TlbMiss:
|
---|
4741 | *
|
---|
4742 | * Call helper to do the fetching.
|
---|
4743 | * We flush all guest register shadow copies here.
|
---|
4744 | */
|
---|
4745 | uint32_t const idxLabelTlbMiss = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbMiss, off, uTlbSeqNo);
|
---|
4746 |
|
---|
4747 | #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
4748 | off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
|
---|
4749 | #else
|
---|
4750 | RT_NOREF(idxInstr);
|
---|
4751 | #endif
|
---|
4752 |
|
---|
4753 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
4754 | if (pReNative->Core.offPc)
|
---|
4755 | {
|
---|
4756 | /*
|
---|
4757 | * Update the program counter but restore it at the end of the TlbMiss branch.
|
---|
4758 | * This should allow delaying more program counter updates for the TlbLookup and hit paths
|
---|
4759 | * which are hopefully much more frequent, reducing the amount of memory accesses.
|
---|
4760 | */
|
---|
4761 | /* Allocate a temporary PC register. */
|
---|
4762 | uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
|
---|
4763 |
|
---|
4764 | /* Perform the addition and store the result. */
|
---|
4765 | off = iemNativeEmitAddGprImm(pReNative, off, idxPcReg, pReNative->Core.offPc);
|
---|
4766 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
|
---|
4767 |
|
---|
4768 | /* Free and flush the PC register. */
|
---|
4769 | iemNativeRegFreeTmp(pReNative, idxPcReg);
|
---|
4770 | iemNativeRegFlushGuestShadowsByHostMask(pReNative, RT_BIT_32(idxPcReg));
|
---|
4771 | }
|
---|
4772 | #endif
|
---|
4773 |
|
---|
4774 | #ifndef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP
|
---|
4775 | /* Save variables in volatile registers. */
|
---|
4776 | uint32_t const fHstRegsNotToSave = TlbState.getRegsNotToSave()
|
---|
4777 | | (idxRegMemResult != UINT8_MAX ? RT_BIT_32(idxRegMemResult) : 0)
|
---|
4778 | | (idxRegValueFetch != UINT8_MAX ? RT_BIT_32(idxRegValueFetch) : 0);
|
---|
4779 | off = iemNativeVarSaveVolatileRegsPreHlpCall(pReNative, off, fHstRegsNotToSave);
|
---|
4780 | #endif
|
---|
4781 |
|
---|
4782 | /* IEMNATIVE_CALL_ARG2/3_GREG = uValue (idxVarValue) - if store */
|
---|
4783 | uint32_t fVolGregMask = IEMNATIVE_CALL_VOLATILE_GREG_MASK;
|
---|
4784 | if (enmOp == kIemNativeEmitMemOp_Store)
|
---|
4785 | {
|
---|
4786 | uint8_t const idxRegArgValue = iSegReg == UINT8_MAX ? IEMNATIVE_CALL_ARG2_GREG : IEMNATIVE_CALL_ARG3_GREG;
|
---|
4787 | off = iemNativeEmitLoadArgGregFromImmOrStackVar(pReNative, off, idxRegArgValue, idxVarValue, 0 /*cbAppend*/,
|
---|
4788 | #ifdef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP
|
---|
4789 | IEMNATIVE_CALL_VOLATILE_GREG_MASK);
|
---|
4790 | #else
|
---|
4791 | IEMNATIVE_CALL_VOLATILE_GREG_MASK, true /*fSpilledVarsInvolatileRegs*/);
|
---|
4792 | fVolGregMask &= ~RT_BIT_32(idxRegArgValue);
|
---|
4793 | #endif
|
---|
4794 | }
|
---|
4795 |
|
---|
4796 | /* IEMNATIVE_CALL_ARG1_GREG = GCPtrMem */
|
---|
4797 | off = iemNativeEmitLoadArgGregFromImmOrStackVar(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxVarGCPtrMem, offDisp /*cbAppend*/,
|
---|
4798 | #ifdef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP
|
---|
4799 | fVolGregMask);
|
---|
4800 | #else
|
---|
4801 | fVolGregMask, true /*fSpilledVarsInvolatileRegs*/);
|
---|
4802 | #endif
|
---|
4803 |
|
---|
4804 | if (iSegReg != UINT8_MAX)
|
---|
4805 | {
|
---|
4806 | /* IEMNATIVE_CALL_ARG2_GREG = iSegReg */
|
---|
4807 | AssertStmt(iSegReg < 6, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_EMIT_BAD_SEG_REG_NO));
|
---|
4808 | off = iemNativeEmitLoadGpr8Imm(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, iSegReg);
|
---|
4809 | }
|
---|
4810 |
|
---|
4811 | /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */
|
---|
4812 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
4813 |
|
---|
4814 | /* Done setting up parameters, make the call. */
|
---|
4815 | off = iemNativeEmitCallImm(pReNative, off, pfnFunction);
|
---|
4816 |
|
---|
4817 | /*
|
---|
4818 | * Put the result in the right register if this is a fetch.
|
---|
4819 | */
|
---|
4820 | if (enmOp != kIemNativeEmitMemOp_Store)
|
---|
4821 | {
|
---|
4822 | Assert(idxRegValueFetch == pVarValue->idxReg);
|
---|
4823 | if (idxRegValueFetch != IEMNATIVE_CALL_RET_GREG)
|
---|
4824 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegValueFetch, IEMNATIVE_CALL_RET_GREG);
|
---|
4825 | }
|
---|
4826 |
|
---|
4827 | #ifndef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP
|
---|
4828 | /* Restore variables and guest shadow registers to volatile registers. */
|
---|
4829 | off = iemNativeVarRestoreVolatileRegsPostHlpCall(pReNative, off, fHstRegsNotToSave);
|
---|
4830 | off = iemNativeRegRestoreGuestShadowsInVolatileRegs(pReNative, off, TlbState.getActiveRegsWithShadows());
|
---|
4831 | #endif
|
---|
4832 |
|
---|
4833 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
4834 | if (pReNative->Core.offPc)
|
---|
4835 | {
|
---|
4836 | /*
|
---|
4837 | * Time to restore the program counter to its original value.
|
---|
4838 | */
|
---|
4839 | /* Allocate a temporary PC register. */
|
---|
4840 | uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
|
---|
4841 |
|
---|
4842 | /* Restore the original value. */
|
---|
4843 | off = iemNativeEmitSubGprImm(pReNative, off, idxPcReg, pReNative->Core.offPc);
|
---|
4844 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
|
---|
4845 |
|
---|
4846 | /* Free and flush the PC register. */
|
---|
4847 | iemNativeRegFreeTmp(pReNative, idxPcReg);
|
---|
4848 | iemNativeRegFlushGuestShadowsByHostMask(pReNative, RT_BIT_32(idxPcReg));
|
---|
4849 | }
|
---|
4850 | #endif
|
---|
4851 |
|
---|
4852 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP
|
---|
4853 | if (!TlbState.fSkip)
|
---|
4854 | {
|
---|
4855 | /* end of TlbMiss - Jump to the done label. */
|
---|
4856 | uint32_t const idxLabelTlbDone = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbDone, UINT32_MAX, uTlbSeqNo);
|
---|
4857 | off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbDone);
|
---|
4858 |
|
---|
4859 | /*
|
---|
4860 | * TlbLookup:
|
---|
4861 | */
|
---|
4862 | off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMem, fAlignMask,
|
---|
4863 | enmOp == kIemNativeEmitMemOp_Store ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ,
|
---|
4864 | idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult, offDisp);
|
---|
4865 |
|
---|
4866 | /*
|
---|
4867 | * Emit code to do the actual storing / fetching.
|
---|
4868 | */
|
---|
4869 | PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 64);
|
---|
4870 | # ifdef VBOX_WITH_STATISTICS
|
---|
4871 | off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, TlbState.idxReg1, TlbState.idxReg2,
|
---|
4872 | enmOp == kIemNativeEmitMemOp_Store
|
---|
4873 | ? RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeTlbHitsForFetch)
|
---|
4874 | : RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeTlbHitsForStore));
|
---|
4875 | # endif
|
---|
4876 | switch (enmOp)
|
---|
4877 | {
|
---|
4878 | case kIemNativeEmitMemOp_Store:
|
---|
4879 | if (pVarValue->enmKind != kIemNativeVarKind_Immediate)
|
---|
4880 | {
|
---|
4881 | switch (cbMem)
|
---|
4882 | {
|
---|
4883 | case 1:
|
---|
4884 | off = iemNativeEmitStoreGpr8ByGprEx(pCodeBuf, off, idxRegValueStore, idxRegMemResult);
|
---|
4885 | break;
|
---|
4886 | case 2:
|
---|
4887 | off = iemNativeEmitStoreGpr16ByGprEx(pCodeBuf, off, idxRegValueStore, idxRegMemResult);
|
---|
4888 | break;
|
---|
4889 | case 4:
|
---|
4890 | off = iemNativeEmitStoreGpr32ByGprEx(pCodeBuf, off, idxRegValueStore, idxRegMemResult);
|
---|
4891 | break;
|
---|
4892 | case 8:
|
---|
4893 | off = iemNativeEmitStoreGpr64ByGprEx(pCodeBuf, off, idxRegValueStore, idxRegMemResult);
|
---|
4894 | break;
|
---|
4895 | default:
|
---|
4896 | AssertFailed();
|
---|
4897 | }
|
---|
4898 | }
|
---|
4899 | else
|
---|
4900 | {
|
---|
4901 | switch (cbMem)
|
---|
4902 | {
|
---|
4903 | case 1:
|
---|
4904 | off = iemNativeEmitStoreImm8ByGprEx(pCodeBuf, off, (uint8_t)pVarValue->u.uValue,
|
---|
4905 | idxRegMemResult, TlbState.idxReg1);
|
---|
4906 | break;
|
---|
4907 | case 2:
|
---|
4908 | off = iemNativeEmitStoreImm16ByGprEx(pCodeBuf, off, (uint16_t)pVarValue->u.uValue,
|
---|
4909 | idxRegMemResult, TlbState.idxReg1);
|
---|
4910 | break;
|
---|
4911 | case 4:
|
---|
4912 | off = iemNativeEmitStoreImm32ByGprEx(pCodeBuf, off, (uint32_t)pVarValue->u.uValue,
|
---|
4913 | idxRegMemResult, TlbState.idxReg1);
|
---|
4914 | break;
|
---|
4915 | case 8:
|
---|
4916 | off = iemNativeEmitStoreImm64ByGprEx(pCodeBuf, off, pVarValue->u.uValue,
|
---|
4917 | idxRegMemResult, TlbState.idxReg1);
|
---|
4918 | break;
|
---|
4919 | default:
|
---|
4920 | AssertFailed();
|
---|
4921 | }
|
---|
4922 | }
|
---|
4923 | break;
|
---|
4924 |
|
---|
4925 | case kIemNativeEmitMemOp_Fetch:
|
---|
4926 | case kIemNativeEmitMemOp_Fetch_Zx_U16:
|
---|
4927 | case kIemNativeEmitMemOp_Fetch_Zx_U32:
|
---|
4928 | case kIemNativeEmitMemOp_Fetch_Zx_U64:
|
---|
4929 | switch (cbMem)
|
---|
4930 | {
|
---|
4931 | case 1:
|
---|
4932 | off = iemNativeEmitLoadGprByGprU8Ex(pCodeBuf, off, idxRegValueFetch, idxRegMemResult);
|
---|
4933 | break;
|
---|
4934 | case 2:
|
---|
4935 | off = iemNativeEmitLoadGprByGprU16Ex(pCodeBuf, off, idxRegValueFetch, idxRegMemResult);
|
---|
4936 | break;
|
---|
4937 | case 4:
|
---|
4938 | off = iemNativeEmitLoadGprByGprU32Ex(pCodeBuf, off, idxRegValueFetch, idxRegMemResult);
|
---|
4939 | break;
|
---|
4940 | case 8:
|
---|
4941 | off = iemNativeEmitLoadGprByGprU64Ex(pCodeBuf, off, idxRegValueFetch, idxRegMemResult);
|
---|
4942 | break;
|
---|
4943 | default:
|
---|
4944 | AssertFailed();
|
---|
4945 | }
|
---|
4946 | break;
|
---|
4947 |
|
---|
4948 | case kIemNativeEmitMemOp_Fetch_Sx_U16:
|
---|
4949 | Assert(cbMem == 1);
|
---|
4950 | off = iemNativeEmitLoadGprByGprU16SignExtendedFromS8Ex(pCodeBuf, off, idxRegValueFetch, idxRegMemResult);
|
---|
4951 | break;
|
---|
4952 |
|
---|
4953 | case kIemNativeEmitMemOp_Fetch_Sx_U32:
|
---|
4954 | Assert(cbMem == 1 || cbMem == 2);
|
---|
4955 | if (cbMem == 1)
|
---|
4956 | off = iemNativeEmitLoadGprByGprU32SignExtendedFromS8Ex(pCodeBuf, off, idxRegValueFetch, idxRegMemResult);
|
---|
4957 | else
|
---|
4958 | off = iemNativeEmitLoadGprByGprU32SignExtendedFromS16Ex(pCodeBuf, off, idxRegValueFetch, idxRegMemResult);
|
---|
4959 | break;
|
---|
4960 |
|
---|
4961 | case kIemNativeEmitMemOp_Fetch_Sx_U64:
|
---|
4962 | switch (cbMem)
|
---|
4963 | {
|
---|
4964 | case 1:
|
---|
4965 | off = iemNativeEmitLoadGprByGprU64SignExtendedFromS8Ex(pCodeBuf, off, idxRegValueFetch, idxRegMemResult);
|
---|
4966 | break;
|
---|
4967 | case 2:
|
---|
4968 | off = iemNativeEmitLoadGprByGprU64SignExtendedFromS16Ex(pCodeBuf, off, idxRegValueFetch, idxRegMemResult);
|
---|
4969 | break;
|
---|
4970 | case 4:
|
---|
4971 | off = iemNativeEmitLoadGprByGprU64SignExtendedFromS32Ex(pCodeBuf, off, idxRegValueFetch, idxRegMemResult);
|
---|
4972 | break;
|
---|
4973 | default:
|
---|
4974 | AssertFailed();
|
---|
4975 | }
|
---|
4976 | break;
|
---|
4977 |
|
---|
4978 | default:
|
---|
4979 | AssertFailed();
|
---|
4980 | }
|
---|
4981 |
|
---|
4982 | iemNativeRegFreeTmp(pReNative, idxRegMemResult);
|
---|
4983 |
|
---|
4984 | /*
|
---|
4985 | * TlbDone:
|
---|
4986 | */
|
---|
4987 | iemNativeLabelDefine(pReNative, idxLabelTlbDone, off);
|
---|
4988 |
|
---|
4989 | TlbState.freeRegsAndReleaseVars(pReNative, idxVarGCPtrMem);
|
---|
4990 |
|
---|
4991 | # ifndef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP
|
---|
4992 | /* Temp Hack: Flush all guest shadows in volatile registers in case of TLB miss. */
|
---|
4993 | iemNativeRegFlushGuestShadowsByHostMask(pReNative, IEMNATIVE_CALL_VOLATILE_GREG_MASK);
|
---|
4994 | # endif
|
---|
4995 | }
|
---|
4996 | #else
|
---|
4997 | RT_NOREF(fAlignMask, idxLabelTlbMiss);
|
---|
4998 | #endif
|
---|
4999 |
|
---|
5000 | if (idxRegValueFetch != UINT8_MAX || idxRegValueStore != UINT8_MAX)
|
---|
5001 | iemNativeVarRegisterRelease(pReNative, idxVarValue);
|
---|
5002 | return off;
|
---|
5003 | }
|
---|
5004 |
|
---|
5005 |
|
---|
5006 |
|
---|
5007 | /*********************************************************************************************************************************
|
---|
5008 | * Memory fetches (IEM_MEM_FETCH_XXX). *
|
---|
5009 | *********************************************************************************************************************************/
|
---|
5010 |
|
---|
5011 | /* 8-bit segmented: */
|
---|
5012 | #define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
|
---|
5013 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u8Dst, a_iSeg, a_GCPtrMem, \
|
---|
5014 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Fetch, \
|
---|
5015 | (uintptr_t)iemNativeHlpMemFetchDataU8, pCallEntry->idxInstr)
|
---|
5016 |
|
---|
5017 | #define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
|
---|
5018 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u16Dst, a_iSeg, a_GCPtrMem, \
|
---|
5019 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Fetch_Zx_U16, \
|
---|
5020 | (uintptr_t)iemNativeHlpMemFetchDataU8, pCallEntry->idxInstr)
|
---|
5021 |
|
---|
5022 | #define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
|
---|
5023 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, a_iSeg, a_GCPtrMem, \
|
---|
5024 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Fetch_Zx_U32, \
|
---|
5025 | (uintptr_t)iemNativeHlpMemFetchDataU8, pCallEntry->idxInstr)
|
---|
5026 |
|
---|
5027 | #define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
|
---|
5028 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, a_iSeg, a_GCPtrMem, \
|
---|
5029 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Fetch_Zx_U64, \
|
---|
5030 | (uintptr_t)iemNativeHlpMemFetchDataU8, pCallEntry->idxInstr)
|
---|
5031 |
|
---|
5032 | #define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
|
---|
5033 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u16Dst, a_iSeg, a_GCPtrMem, \
|
---|
5034 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Fetch_Sx_U16, \
|
---|
5035 | (uintptr_t)iemNativeHlpMemFetchDataU8_Sx_U16, pCallEntry->idxInstr)
|
---|
5036 |
|
---|
5037 | #define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
|
---|
5038 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, a_iSeg, a_GCPtrMem, \
|
---|
5039 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Fetch_Sx_U32, \
|
---|
5040 | (uintptr_t)iemNativeHlpMemFetchDataU8_Sx_U32, pCallEntry->idxInstr)
|
---|
5041 |
|
---|
5042 | #define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
|
---|
5043 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, a_iSeg, a_GCPtrMem, \
|
---|
5044 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Fetch_Sx_U64, \
|
---|
5045 | (uintptr_t)iemNativeHlpMemFetchDataU8_Sx_U64, pCallEntry->idxInstr)
|
---|
5046 |
|
---|
5047 | /* 16-bit segmented: */
|
---|
5048 | #define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
|
---|
5049 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u16Dst, a_iSeg, a_GCPtrMem, \
|
---|
5050 | sizeof(uint16_t), sizeof(uint16_t) - 1, kIemNativeEmitMemOp_Fetch, \
|
---|
5051 | (uintptr_t)iemNativeHlpMemFetchDataU16, pCallEntry->idxInstr)
|
---|
5052 |
|
---|
5053 | #define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
|
---|
5054 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u16Dst, a_iSeg, a_GCPtrMem, \
|
---|
5055 | sizeof(uint16_t), sizeof(uint16_t) - 1, kIemNativeEmitMemOp_Fetch, \
|
---|
5056 | (uintptr_t)iemNativeHlpMemFetchDataU16, pCallEntry->idxInstr, a_offDisp)
|
---|
5057 |
|
---|
5058 | #define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
|
---|
5059 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, a_iSeg, a_GCPtrMem, \
|
---|
5060 | sizeof(uint16_t), sizeof(uint16_t) - 1, kIemNativeEmitMemOp_Fetch_Zx_U32, \
|
---|
5061 | (uintptr_t)iemNativeHlpMemFetchDataU16, pCallEntry->idxInstr)
|
---|
5062 |
|
---|
5063 | #define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
|
---|
5064 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, a_iSeg, a_GCPtrMem, \
|
---|
5065 | sizeof(uint16_t), sizeof(uint16_t) - 1, kIemNativeEmitMemOp_Fetch_Zx_U64, \
|
---|
5066 | (uintptr_t)iemNativeHlpMemFetchDataU16, pCallEntry->idxInstr)
|
---|
5067 |
|
---|
5068 | #define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
|
---|
5069 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, a_iSeg, a_GCPtrMem, \
|
---|
5070 | sizeof(uint16_t), sizeof(uint16_t) - 1, kIemNativeEmitMemOp_Fetch_Sx_U32, \
|
---|
5071 | (uintptr_t)iemNativeHlpMemFetchDataU16_Sx_U32, pCallEntry->idxInstr)
|
---|
5072 |
|
---|
5073 | #define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
|
---|
5074 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, a_iSeg, a_GCPtrMem, \
|
---|
5075 | sizeof(uint16_t), sizeof(uint16_t) - 1, kIemNativeEmitMemOp_Fetch_Sx_U64, \
|
---|
5076 | (uintptr_t)iemNativeHlpMemFetchDataU16_Sx_U64, pCallEntry->idxInstr)
|
---|
5077 |
|
---|
5078 |
|
---|
5079 | /* 32-bit segmented: */
|
---|
5080 | #define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
|
---|
5081 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, a_iSeg, a_GCPtrMem, \
|
---|
5082 | sizeof(uint32_t), sizeof(uint32_t) - 1, kIemNativeEmitMemOp_Fetch, \
|
---|
5083 | (uintptr_t)iemNativeHlpMemFetchDataU32, pCallEntry->idxInstr)
|
---|
5084 |
|
---|
5085 | #define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
|
---|
5086 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, a_iSeg, a_GCPtrMem, \
|
---|
5087 | sizeof(uint32_t), sizeof(uint32_t) - 1, kIemNativeEmitMemOp_Fetch, \
|
---|
5088 | (uintptr_t)iemNativeHlpMemFetchDataU32, pCallEntry->idxInstr, a_offDisp)
|
---|
5089 |
|
---|
5090 | #define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
|
---|
5091 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, a_iSeg, a_GCPtrMem, \
|
---|
5092 | sizeof(uint32_t), sizeof(uint32_t) - 1, kIemNativeEmitMemOp_Fetch_Zx_U64, \
|
---|
5093 | (uintptr_t)iemNativeHlpMemFetchDataU32, pCallEntry->idxInstr)
|
---|
5094 |
|
---|
5095 | #define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
|
---|
5096 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, a_iSeg, a_GCPtrMem, \
|
---|
5097 | sizeof(uint32_t), sizeof(uint32_t) - 1, kIemNativeEmitMemOp_Fetch_Sx_U64, \
|
---|
5098 | (uintptr_t)iemNativeHlpMemFetchDataU32_Sx_U64, pCallEntry->idxInstr)
|
---|
5099 |
|
---|
5100 |
|
---|
5101 | /* 64-bit segmented: */
|
---|
5102 | #define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
|
---|
5103 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, a_iSeg, a_GCPtrMem, \
|
---|
5104 | sizeof(uint64_t), sizeof(uint64_t) - 1, kIemNativeEmitMemOp_Fetch, \
|
---|
5105 | (uintptr_t)iemNativeHlpMemFetchDataU64, pCallEntry->idxInstr)
|
---|
5106 |
|
---|
5107 |
|
---|
5108 |
|
---|
5109 | /* 8-bit flat: */
|
---|
5110 | #define IEM_MC_FETCH_MEM_FLAT_U8(a_u8Dst, a_GCPtrMem) \
|
---|
5111 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u8Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5112 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Fetch, \
|
---|
5113 | (uintptr_t)iemNativeHlpMemFlatFetchDataU8, pCallEntry->idxInstr)
|
---|
5114 |
|
---|
5115 | #define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16(a_u16Dst, a_GCPtrMem) \
|
---|
5116 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u16Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5117 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Fetch_Zx_U16, \
|
---|
5118 | (uintptr_t)iemNativeHlpMemFlatFetchDataU8, pCallEntry->idxInstr)
|
---|
5119 |
|
---|
5120 | #define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32(a_u32Dst, a_GCPtrMem) \
|
---|
5121 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5122 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Fetch_Zx_U32, \
|
---|
5123 | (uintptr_t)iemNativeHlpMemFlatFetchDataU8, pCallEntry->idxInstr)
|
---|
5124 |
|
---|
5125 | #define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64(a_u64Dst, a_GCPtrMem) \
|
---|
5126 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5127 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Fetch_Zx_U64, \
|
---|
5128 | (uintptr_t)iemNativeHlpMemFlatFetchDataU8, pCallEntry->idxInstr)
|
---|
5129 |
|
---|
5130 | #define IEM_MC_FETCH_MEM_FLAT_U8_SX_U16(a_u16Dst, a_GCPtrMem) \
|
---|
5131 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u16Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5132 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Fetch_Sx_U16, \
|
---|
5133 | (uintptr_t)iemNativeHlpMemFlatFetchDataU8_Sx_U16, pCallEntry->idxInstr)
|
---|
5134 |
|
---|
5135 | #define IEM_MC_FETCH_MEM_FLAT_U8_SX_U32(a_u32Dst, a_GCPtrMem) \
|
---|
5136 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5137 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Fetch_Sx_U32, \
|
---|
5138 | (uintptr_t)iemNativeHlpMemFlatFetchDataU8_Sx_U32, pCallEntry->idxInstr)
|
---|
5139 |
|
---|
5140 | #define IEM_MC_FETCH_MEM_FLAT_U8_SX_U64(a_u64Dst, a_GCPtrMem) \
|
---|
5141 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5142 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Fetch_Sx_U64, \
|
---|
5143 | (uintptr_t)iemNativeHlpMemFlatFetchDataU8_Sx_U64, pCallEntry->idxInstr)
|
---|
5144 |
|
---|
5145 |
|
---|
5146 | /* 16-bit flat: */
|
---|
5147 | #define IEM_MC_FETCH_MEM_FLAT_U16(a_u16Dst, a_GCPtrMem) \
|
---|
5148 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u16Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5149 | sizeof(uint16_t), sizeof(uint16_t) - 1, kIemNativeEmitMemOp_Fetch, \
|
---|
5150 | (uintptr_t)iemNativeHlpMemFlatFetchDataU16, pCallEntry->idxInstr)
|
---|
5151 |
|
---|
5152 | #define IEM_MC_FETCH_MEM_FLAT_U16_DISP(a_u16Dst, a_GCPtrMem, a_offDisp) \
|
---|
5153 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u16Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5154 | sizeof(uint16_t), sizeof(uint16_t) - 1, kIemNativeEmitMemOp_Fetch, \
|
---|
5155 | (uintptr_t)iemNativeHlpMemFlatFetchDataU16, pCallEntry->idxInstr, a_offDisp)
|
---|
5156 |
|
---|
5157 | #define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32(a_u32Dst, a_GCPtrMem) \
|
---|
5158 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5159 | sizeof(uint16_t), sizeof(uint16_t) - 1, kIemNativeEmitMemOp_Fetch_Zx_U32, \
|
---|
5160 | (uintptr_t)iemNativeHlpMemFlatFetchDataU16, pCallEntry->idxInstr)
|
---|
5161 |
|
---|
5162 | #define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64(a_u64Dst, a_GCPtrMem) \
|
---|
5163 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5164 | sizeof(uint16_t), sizeof(uint16_t) - 1, kIemNativeEmitMemOp_Fetch_Zx_U64, \
|
---|
5165 | (uintptr_t)iemNativeHlpMemFlatFetchDataU16, pCallEntry->idxInstr)
|
---|
5166 |
|
---|
5167 | #define IEM_MC_FETCH_MEM_FLAT_U16_SX_U32(a_u32Dst, a_GCPtrMem) \
|
---|
5168 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5169 | sizeof(uint16_t), sizeof(uint16_t) - 1, kIemNativeEmitMemOp_Fetch_Sx_U32, \
|
---|
5170 | (uintptr_t)iemNativeHlpMemFlatFetchDataU16_Sx_U32, pCallEntry->idxInstr)
|
---|
5171 |
|
---|
5172 | #define IEM_MC_FETCH_MEM_FLAT_U16_SX_U64(a_u64Dst, a_GCPtrMem) \
|
---|
5173 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5174 | sizeof(uint16_t), sizeof(uint16_t) - 1, kIemNativeEmitMemOp_Fetch_Sx_U64, \
|
---|
5175 | (uintptr_t)iemNativeHlpMemFlatFetchDataU16_Sx_U64, pCallEntry->idxInstr)
|
---|
5176 |
|
---|
5177 | /* 32-bit flat: */
|
---|
5178 | #define IEM_MC_FETCH_MEM_FLAT_U32(a_u32Dst, a_GCPtrMem) \
|
---|
5179 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5180 | sizeof(uint32_t), sizeof(uint32_t) - 1, kIemNativeEmitMemOp_Fetch, \
|
---|
5181 | (uintptr_t)iemNativeHlpMemFlatFetchDataU32, pCallEntry->idxInstr)
|
---|
5182 |
|
---|
5183 | #define IEM_MC_FETCH_MEM_FLAT_U32_DISP(a_u32Dst, a_GCPtrMem, a_offDisp) \
|
---|
5184 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5185 | sizeof(uint32_t), sizeof(uint32_t) - 1, kIemNativeEmitMemOp_Fetch, \
|
---|
5186 | (uintptr_t)iemNativeHlpMemFlatFetchDataU32, pCallEntry->idxInstr, a_offDisp)
|
---|
5187 |
|
---|
5188 | #define IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64(a_u64Dst, a_GCPtrMem) \
|
---|
5189 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5190 | sizeof(uint32_t), sizeof(uint32_t) - 1, kIemNativeEmitMemOp_Fetch_Zx_U64, \
|
---|
5191 | (uintptr_t)iemNativeHlpMemFlatFetchDataU32, pCallEntry->idxInstr)
|
---|
5192 |
|
---|
5193 | #define IEM_MC_FETCH_MEM_FLAT_U32_SX_U64(a_u64Dst, a_GCPtrMem) \
|
---|
5194 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5195 | sizeof(uint32_t), sizeof(uint32_t) - 1, kIemNativeEmitMemOp_Fetch_Sx_U64, \
|
---|
5196 | (uintptr_t)iemNativeHlpMemFlatFetchDataU32_Sx_U64, pCallEntry->idxInstr)
|
---|
5197 |
|
---|
5198 | /* 64-bit flat: */
|
---|
5199 | #define IEM_MC_FETCH_MEM_FLAT_U64(a_u64Dst, a_GCPtrMem) \
|
---|
5200 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, UINT8_MAX, a_GCPtrMem, \
|
---|
5201 | sizeof(uint64_t), sizeof(uint64_t) - 1, kIemNativeEmitMemOp_Fetch, \
|
---|
5202 | (uintptr_t)iemNativeHlpMemFlatFetchDataU64, pCallEntry->idxInstr)
|
---|
5203 |
|
---|
5204 |
|
---|
5205 |
|
---|
5206 | /*********************************************************************************************************************************
|
---|
5207 | * Memory stores (IEM_MEM_STORE_XXX). *
|
---|
5208 | *********************************************************************************************************************************/
|
---|
5209 |
|
---|
5210 | #define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
|
---|
5211 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u8Value, a_iSeg, a_GCPtrMem, \
|
---|
5212 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Store, \
|
---|
5213 | (uintptr_t)iemNativeHlpMemStoreDataU8, pCallEntry->idxInstr)
|
---|
5214 |
|
---|
5215 | #define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
|
---|
5216 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u16Value, a_iSeg, a_GCPtrMem, \
|
---|
5217 | sizeof(uint16_t), sizeof(uint16_t) - 1, kIemNativeEmitMemOp_Store, \
|
---|
5218 | (uintptr_t)iemNativeHlpMemStoreDataU16, pCallEntry->idxInstr)
|
---|
5219 |
|
---|
5220 | #define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
|
---|
5221 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Value, a_iSeg, a_GCPtrMem, \
|
---|
5222 | sizeof(uint32_t), sizeof(uint32_t) - 1, kIemNativeEmitMemOp_Store, \
|
---|
5223 | (uintptr_t)iemNativeHlpMemStoreDataU32, pCallEntry->idxInstr)
|
---|
5224 |
|
---|
5225 | #define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
|
---|
5226 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Value, a_iSeg, a_GCPtrMem, \
|
---|
5227 | sizeof(uint64_t), sizeof(uint64_t) - 1, kIemNativeEmitMemOp_Store, \
|
---|
5228 | (uintptr_t)iemNativeHlpMemStoreDataU64, pCallEntry->idxInstr)
|
---|
5229 |
|
---|
5230 |
|
---|
5231 | #define IEM_MC_STORE_MEM_FLAT_U8(a_GCPtrMem, a_u8Value) \
|
---|
5232 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u8Value, UINT8_MAX, a_GCPtrMem, \
|
---|
5233 | sizeof(uint8_t), 0 /*fAlignMask*/, kIemNativeEmitMemOp_Store, \
|
---|
5234 | (uintptr_t)iemNativeHlpMemFlatStoreDataU8, pCallEntry->idxInstr)
|
---|
5235 |
|
---|
5236 | #define IEM_MC_STORE_MEM_FLAT_U16(a_GCPtrMem, a_u16Value) \
|
---|
5237 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u16Value, UINT8_MAX, a_GCPtrMem, \
|
---|
5238 | sizeof(uint16_t), sizeof(uint16_t) - 1, kIemNativeEmitMemOp_Store, \
|
---|
5239 | (uintptr_t)iemNativeHlpMemFlatStoreDataU16, pCallEntry->idxInstr)
|
---|
5240 |
|
---|
5241 | #define IEM_MC_STORE_MEM_FLAT_U32(a_GCPtrMem, a_u32Value) \
|
---|
5242 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Value, UINT8_MAX, a_GCPtrMem, \
|
---|
5243 | sizeof(uint32_t), sizeof(uint32_t) - 1, kIemNativeEmitMemOp_Store, \
|
---|
5244 | (uintptr_t)iemNativeHlpMemFlatStoreDataU32, pCallEntry->idxInstr)
|
---|
5245 |
|
---|
5246 | #define IEM_MC_STORE_MEM_FLAT_U64(a_GCPtrMem, a_u64Value) \
|
---|
5247 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Value, UINT8_MAX, a_GCPtrMem, \
|
---|
5248 | sizeof(uint64_t), sizeof(uint64_t) - 1, kIemNativeEmitMemOp_Store, \
|
---|
5249 | (uintptr_t)iemNativeHlpMemFlatStoreDataU64, pCallEntry->idxInstr)
|
---|
5250 |
|
---|
5251 |
|
---|
5252 | #define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8ConstValue) \
|
---|
5253 | off = iemNativeEmitMemStoreConstDataCommon(pReNative, off, a_u8ConstValue, a_iSeg, a_GCPtrMem, sizeof(uint8_t), \
|
---|
5254 | (uintptr_t)iemNativeHlpMemStoreDataU8, pCallEntry->idxInstr)
|
---|
5255 |
|
---|
5256 | #define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16ConstValue) \
|
---|
5257 | off = iemNativeEmitMemStoreConstDataCommon(pReNative, off, a_u16ConstValue, a_iSeg, a_GCPtrMem, sizeof(uint16_t), \
|
---|
5258 | (uintptr_t)iemNativeHlpMemStoreDataU16, pCallEntry->idxInstr)
|
---|
5259 |
|
---|
5260 | #define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32ConstValue) \
|
---|
5261 | off = iemNativeEmitMemStoreConstDataCommon(pReNative, off, a_u32ConstValue, a_iSeg, a_GCPtrMem, sizeof(uint32_t), \
|
---|
5262 | (uintptr_t)iemNativeHlpMemStoreDataU32, pCallEntry->idxInstr)
|
---|
5263 |
|
---|
5264 | #define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64ConstValue) \
|
---|
5265 | off = iemNativeEmitMemStoreConstDataCommon(pReNative, off, a_u64ConstValue, a_iSeg, a_GCPtrMem, sizeof(uint64_t), \
|
---|
5266 | (uintptr_t)iemNativeHlpMemStoreDataU64, pCallEntry->idxInstr)
|
---|
5267 |
|
---|
5268 |
|
---|
5269 | #define IEM_MC_STORE_MEM_FLAT_U8_CONST(a_GCPtrMem, a_u8ConstValue) \
|
---|
5270 | off = iemNativeEmitMemStoreConstDataCommon(pReNative, off, a_u8ConstValue, UINT8_MAX, a_GCPtrMem, sizeof(uint8_t), \
|
---|
5271 | (uintptr_t)iemNativeHlpMemFlatStoreDataU8, pCallEntry->idxInstr)
|
---|
5272 |
|
---|
5273 | #define IEM_MC_STORE_MEM_FLAT_U16_CONST(a_GCPtrMem, a_u16ConstValue) \
|
---|
5274 | off = iemNativeEmitMemStoreConstDataCommon(pReNative, off, a_u16ConstValue, UINT8_MAX, a_GCPtrMem, sizeof(uint16_t), \
|
---|
5275 | (uintptr_t)iemNativeHlpMemFlatStoreDataU16, pCallEntry->idxInstr)
|
---|
5276 |
|
---|
5277 | #define IEM_MC_STORE_MEM_FLAT_U32_CONST(a_GCPtrMem, a_u32ConstValue) \
|
---|
5278 | off = iemNativeEmitMemStoreConstDataCommon(pReNative, off, a_u32ConstValue, UINT8_MAX, a_GCPtrMem, sizeof(uint32_t), \
|
---|
5279 | (uintptr_t)iemNativeHlpMemFlatStoreDataU32, pCallEntry->idxInstr)
|
---|
5280 |
|
---|
5281 | #define IEM_MC_STORE_MEM_FLAT_U64_CONST(a_GCPtrMem, a_u64ConstValue) \
|
---|
5282 | off = iemNativeEmitMemStoreConstDataCommon(pReNative, off, a_u64ConstValue, UINT8_MAX, a_GCPtrMem, sizeof(uint64_t), \
|
---|
5283 | (uintptr_t)iemNativeHlpMemFlatStoreDataU64, pCallEntry->idxInstr)
|
---|
5284 |
|
---|
5285 | /** Emits code for IEM_MC_STORE_MEM_U8/16/32/64_CONST and
|
---|
5286 | * IEM_MC_STORE_MEM_FLAT_U8/16/32/64_CONST (with iSegReg = UINT8_MAX). */
|
---|
5287 | DECL_INLINE_THROW(uint32_t)
|
---|
5288 | iemNativeEmitMemStoreConstDataCommon(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint64_t uValueConst, uint8_t iSegReg,
|
---|
5289 | uint8_t idxVarGCPtrMem, uint8_t cbMem, uintptr_t pfnFunction, uint8_t idxInstr)
|
---|
5290 | {
|
---|
5291 | /*
|
---|
5292 | * Create a temporary const variable and call iemNativeEmitMemFetchStoreDataCommon
|
---|
5293 | * to do the grunt work.
|
---|
5294 | */
|
---|
5295 | uint8_t const idxVarConstValue = iemNativeVarAllocConst(pReNative, cbMem, uValueConst);
|
---|
5296 | off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, idxVarConstValue, iSegReg, idxVarGCPtrMem,
|
---|
5297 | cbMem, cbMem - 1, kIemNativeEmitMemOp_Store,
|
---|
5298 | pfnFunction, idxInstr);
|
---|
5299 | iemNativeVarFreeLocal(pReNative, idxVarConstValue);
|
---|
5300 | return off;
|
---|
5301 | }
|
---|
5302 |
|
---|
5303 |
|
---|
5304 |
|
---|
5305 | /*********************************************************************************************************************************
|
---|
5306 | * Stack Accesses. *
|
---|
5307 | *********************************************************************************************************************************/
|
---|
5308 | /* RT_MAKE_U32_FROM_U8(cBitsVar, cBitsFlat, fSReg, 0) */
|
---|
5309 | #define IEM_MC_PUSH_U16(a_u16Value) \
|
---|
5310 | off = iemNativeEmitStackPush(pReNative, off, a_u16Value, RT_MAKE_U32_FROM_U8(16, 0, 0, 0), \
|
---|
5311 | (uintptr_t)iemNativeHlpStackStoreU16, pCallEntry->idxInstr)
|
---|
5312 | #define IEM_MC_PUSH_U32(a_u32Value) \
|
---|
5313 | off = iemNativeEmitStackPush(pReNative, off, a_u32Value, RT_MAKE_U32_FROM_U8(32, 0, 0, 0), \
|
---|
5314 | (uintptr_t)iemNativeHlpStackStoreU32, pCallEntry->idxInstr)
|
---|
5315 | #define IEM_MC_PUSH_U32_SREG(a_uSegVal) \
|
---|
5316 | off = iemNativeEmitStackPush(pReNative, off, a_uSegVal, RT_MAKE_U32_FROM_U8(32, 0, 1, 0), \
|
---|
5317 | (uintptr_t)iemNativeHlpStackStoreU32SReg, pCallEntry->idxInstr)
|
---|
5318 | #define IEM_MC_PUSH_U64(a_u64Value) \
|
---|
5319 | off = iemNativeEmitStackPush(pReNative, off, a_u64Value, RT_MAKE_U32_FROM_U8(64, 0, 0, 0), \
|
---|
5320 | (uintptr_t)iemNativeHlpStackStoreU64, pCallEntry->idxInstr)
|
---|
5321 |
|
---|
5322 | #define IEM_MC_FLAT32_PUSH_U16(a_u16Value) \
|
---|
5323 | off = iemNativeEmitStackPush(pReNative, off, a_u16Value, RT_MAKE_U32_FROM_U8(16, 32, 0, 0), \
|
---|
5324 | (uintptr_t)iemNativeHlpStackFlatStoreU16, pCallEntry->idxInstr)
|
---|
5325 | #define IEM_MC_FLAT32_PUSH_U32(a_u32Value) \
|
---|
5326 | off = iemNativeEmitStackPush(pReNative, off, a_u32Value, RT_MAKE_U32_FROM_U8(32, 32, 0, 0), \
|
---|
5327 | (uintptr_t)iemNativeHlpStackFlatStoreU32, pCallEntry->idxInstr)
|
---|
5328 | #define IEM_MC_FLAT32_PUSH_U32_SREG(a_u32Value) \
|
---|
5329 | off = iemNativeEmitStackPush(pReNative, off, a_u32Value, RT_MAKE_U32_FROM_U8(32, 32, 1, 0), \
|
---|
5330 | (uintptr_t)iemNativeHlpStackFlatStoreU32SReg, pCallEntry->idxInstr)
|
---|
5331 |
|
---|
5332 | #define IEM_MC_FLAT64_PUSH_U16(a_u16Value) \
|
---|
5333 | off = iemNativeEmitStackPush(pReNative, off, a_u16Value, RT_MAKE_U32_FROM_U8(16, 64, 0, 0), \
|
---|
5334 | (uintptr_t)iemNativeHlpStackFlatStoreU16, pCallEntry->idxInstr)
|
---|
5335 | #define IEM_MC_FLAT64_PUSH_U64(a_u64Value) \
|
---|
5336 | off = iemNativeEmitStackPush(pReNative, off, a_u64Value, RT_MAKE_U32_FROM_U8(64, 64, 0, 0), \
|
---|
5337 | (uintptr_t)iemNativeHlpStackFlatStoreU64, pCallEntry->idxInstr)
|
---|
5338 |
|
---|
5339 |
|
---|
5340 | DECL_FORCE_INLINE_THROW(uint32_t)
|
---|
5341 | iemNativeEmitStackPushUse16Sp(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t idxRegRsp, uint8_t idxRegEffSp, uint8_t cbMem)
|
---|
5342 | {
|
---|
5343 | /* Use16BitSp: */
|
---|
5344 | #ifdef RT_ARCH_AMD64
|
---|
5345 | off = iemNativeEmitSubGpr16ImmEx(pCodeBuf, off, idxRegRsp, cbMem); /* ASSUMES this does NOT modify bits [63:16]! */
|
---|
5346 | off = iemNativeEmitLoadGprFromGpr16Ex(pCodeBuf, off, idxRegEffSp, idxRegRsp);
|
---|
5347 | #else
|
---|
5348 | /* sub regeff, regrsp, #cbMem */
|
---|
5349 | pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegEffSp, idxRegRsp, cbMem, false /*f64Bit*/);
|
---|
5350 | /* and regeff, regeff, #0xffff */
|
---|
5351 | Assert(Armv8A64ConvertImmRImmS2Mask32(15, 0) == 0xffff);
|
---|
5352 | pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxRegEffSp, idxRegEffSp, 15, 0, false /*f64Bit*/);
|
---|
5353 | /* bfi regrsp, regeff, #0, #16 - moves bits 15:0 from idxVarReg to idxGstTmpReg bits 15:0. */
|
---|
5354 | pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegRsp, idxRegEffSp, 0, 16, false /*f64Bit*/);
|
---|
5355 | #endif
|
---|
5356 | return off;
|
---|
5357 | }
|
---|
5358 |
|
---|
5359 |
|
---|
5360 | DECL_FORCE_INLINE(uint32_t)
|
---|
5361 | iemNativeEmitStackPushUse32Sp(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t idxRegRsp, uint8_t idxRegEffSp, uint8_t cbMem)
|
---|
5362 | {
|
---|
5363 | /* Use32BitSp: */
|
---|
5364 | off = iemNativeEmitSubGpr32ImmEx(pCodeBuf, off, idxRegRsp, cbMem);
|
---|
5365 | off = iemNativeEmitLoadGprFromGpr32Ex(pCodeBuf, off, idxRegEffSp, idxRegRsp);
|
---|
5366 | return off;
|
---|
5367 | }
|
---|
5368 |
|
---|
5369 |
|
---|
5370 | /** IEM_MC[|_FLAT32|_FLAT64]_PUSH_U16/32/32_SREG/64 */
|
---|
5371 | DECL_INLINE_THROW(uint32_t)
|
---|
5372 | iemNativeEmitStackPush(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarValue,
|
---|
5373 | uint32_t cBitsVarAndFlat, uintptr_t pfnFunction, uint8_t idxInstr)
|
---|
5374 | {
|
---|
5375 | /*
|
---|
5376 | * Assert sanity.
|
---|
5377 | */
|
---|
5378 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarValue);
|
---|
5379 | PIEMNATIVEVAR const pVarValue = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVarValue)];
|
---|
5380 | #ifdef VBOX_STRICT
|
---|
5381 | if (RT_BYTE2(cBitsVarAndFlat) != 0)
|
---|
5382 | {
|
---|
5383 | Assert( (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
|
---|
5384 | || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
|
---|
5385 | || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT);
|
---|
5386 | Assert( pfnFunction
|
---|
5387 | == ( cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 32, 0, 0) ? (uintptr_t)iemNativeHlpStackFlatStoreU16
|
---|
5388 | : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 32, 0, 0) ? (uintptr_t)iemNativeHlpStackFlatStoreU32
|
---|
5389 | : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 32, 1, 0) ? (uintptr_t)iemNativeHlpStackFlatStoreU32SReg
|
---|
5390 | : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 64, 0, 0) ? (uintptr_t)iemNativeHlpStackFlatStoreU16
|
---|
5391 | : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(64, 64, 0, 0) ? (uintptr_t)iemNativeHlpStackFlatStoreU64
|
---|
5392 | : UINT64_C(0xc000b000a0009000) ));
|
---|
5393 | }
|
---|
5394 | else
|
---|
5395 | Assert( pfnFunction
|
---|
5396 | == ( cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackStoreU16
|
---|
5397 | : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackStoreU32
|
---|
5398 | : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 0, 1, 0) ? (uintptr_t)iemNativeHlpStackStoreU32SReg
|
---|
5399 | : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(64, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackStoreU64
|
---|
5400 | : UINT64_C(0xc000b000a0009000) ));
|
---|
5401 | #endif
|
---|
5402 |
|
---|
5403 | #ifdef VBOX_STRICT
|
---|
5404 | /*
|
---|
5405 | * Check that the fExec flags we've got make sense.
|
---|
5406 | */
|
---|
5407 | off = iemNativeEmitExecFlagsCheck(pReNative, off, pReNative->fExec);
|
---|
5408 | #endif
|
---|
5409 |
|
---|
5410 | /*
|
---|
5411 | * To keep things simple we have to commit any pending writes first as we
|
---|
5412 | * may end up making calls.
|
---|
5413 | */
|
---|
5414 | /** @todo we could postpone this till we make the call and reload the
|
---|
5415 | * registers after returning from the call. Not sure if that's sensible or
|
---|
5416 | * not, though. */
|
---|
5417 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
5418 |
|
---|
5419 | /*
|
---|
5420 | * First we calculate the new RSP and the effective stack pointer value.
|
---|
5421 | * For 64-bit mode and flat 32-bit these two are the same.
|
---|
5422 | * (Code structure is very similar to that of PUSH)
|
---|
5423 | */
|
---|
5424 | uint8_t const cbMem = RT_BYTE1(cBitsVarAndFlat) / 8;
|
---|
5425 | bool const fIsSegReg = RT_BYTE3(cBitsVarAndFlat) != 0;
|
---|
5426 | bool const fIsIntelSeg = fIsSegReg && IEM_IS_GUEST_CPU_INTEL(pReNative->pVCpu);
|
---|
5427 | uint8_t const cbMemAccess = !fIsIntelSeg || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_16BIT
|
---|
5428 | ? cbMem : sizeof(uint16_t);
|
---|
5429 | uint8_t const cBitsFlat = RT_BYTE2(cBitsVarAndFlat); RT_NOREF(cBitsFlat);
|
---|
5430 | uint8_t const idxRegRsp = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xSP),
|
---|
5431 | kIemNativeGstRegUse_ForUpdate, true /*fNoVolatileRegs*/);
|
---|
5432 | uint8_t const idxRegEffSp = cBitsFlat != 0 ? idxRegRsp : iemNativeRegAllocTmp(pReNative, &off);
|
---|
5433 | uint32_t offFixupJumpToUseOtherBitSp = UINT32_MAX;
|
---|
5434 | if (cBitsFlat != 0)
|
---|
5435 | {
|
---|
5436 | Assert(idxRegEffSp == idxRegRsp);
|
---|
5437 | Assert(cBitsFlat == 32 || cBitsFlat == 64);
|
---|
5438 | Assert(IEM_F_MODE_X86_IS_FLAT(pReNative->fExec));
|
---|
5439 | if (cBitsFlat == 64)
|
---|
5440 | off = iemNativeEmitSubGprImm(pReNative, off, idxRegRsp, cbMem);
|
---|
5441 | else
|
---|
5442 | off = iemNativeEmitSubGpr32Imm(pReNative, off, idxRegRsp, cbMem);
|
---|
5443 | }
|
---|
5444 | else /** @todo We can skip the test if we're targeting pre-386 CPUs. */
|
---|
5445 | {
|
---|
5446 | Assert(idxRegEffSp != idxRegRsp);
|
---|
5447 | uint8_t const idxRegSsAttr = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_ATTRIB(X86_SREG_SS),
|
---|
5448 | kIemNativeGstRegUse_ReadOnly);
|
---|
5449 | #ifdef RT_ARCH_AMD64
|
---|
5450 | PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);
|
---|
5451 | #else
|
---|
5452 | PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
|
---|
5453 | #endif
|
---|
5454 | off = iemNativeEmitTestAnyBitsInGpr32Ex(pCodeBuf, off, idxRegSsAttr, X86DESCATTR_D);
|
---|
5455 | iemNativeRegFreeTmp(pReNative, idxRegSsAttr);
|
---|
5456 | offFixupJumpToUseOtherBitSp = off;
|
---|
5457 | if ((pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT)
|
---|
5458 | {
|
---|
5459 | off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off /*8-bit suffices*/, kIemNativeInstrCond_e); /* jump if zero */
|
---|
5460 | off = iemNativeEmitStackPushUse32Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem);
|
---|
5461 | }
|
---|
5462 | else
|
---|
5463 | {
|
---|
5464 | off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off /*8-bit suffices*/, kIemNativeInstrCond_ne); /* jump if not zero */
|
---|
5465 | off = iemNativeEmitStackPushUse16Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem);
|
---|
5466 | }
|
---|
5467 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
5468 | }
|
---|
5469 | /* SpUpdateEnd: */
|
---|
5470 | uint32_t const offLabelSpUpdateEnd = off;
|
---|
5471 |
|
---|
5472 | /*
|
---|
5473 | * Okay, now prepare for TLB lookup and jump to code (or the TlbMiss if
|
---|
5474 | * we're skipping lookup).
|
---|
5475 | */
|
---|
5476 | uint8_t const iSegReg = cBitsFlat != 0 ? UINT8_MAX : X86_SREG_SS;
|
---|
5477 | IEMNATIVEEMITTLBSTATE const TlbState(pReNative, idxRegEffSp, &off, iSegReg, cbMemAccess);
|
---|
5478 | uint16_t const uTlbSeqNo = pReNative->uTlbSeqNo++;
|
---|
5479 | uint32_t const idxLabelTlbMiss = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbMiss, UINT32_MAX, uTlbSeqNo);
|
---|
5480 | uint32_t const idxLabelTlbLookup = !TlbState.fSkip
|
---|
5481 | ? iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbLookup, UINT32_MAX, uTlbSeqNo)
|
---|
5482 | : UINT32_MAX;
|
---|
5483 | uint8_t const idxRegValue = !TlbState.fSkip
|
---|
5484 | && pVarValue->enmKind != kIemNativeVarKind_Immediate
|
---|
5485 | ? iemNativeVarRegisterAcquire(pReNative, idxVarValue, &off, true /*fInitialized*/,
|
---|
5486 | IEMNATIVE_CALL_ARG2_GREG /*idxRegPref*/)
|
---|
5487 | : UINT8_MAX;
|
---|
5488 | uint8_t const idxRegMemResult = !TlbState.fSkip ? iemNativeRegAllocTmp(pReNative, &off) : UINT8_MAX;
|
---|
5489 |
|
---|
5490 |
|
---|
5491 | if (!TlbState.fSkip)
|
---|
5492 | off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbLookup); /** @todo short jump */
|
---|
5493 | else
|
---|
5494 | off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbMiss); /** @todo short jump */
|
---|
5495 |
|
---|
5496 | /*
|
---|
5497 | * Use16BitSp:
|
---|
5498 | */
|
---|
5499 | if (cBitsFlat == 0)
|
---|
5500 | {
|
---|
5501 | #ifdef RT_ARCH_AMD64
|
---|
5502 | PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);
|
---|
5503 | #else
|
---|
5504 | PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
|
---|
5505 | #endif
|
---|
5506 | iemNativeFixupFixedJump(pReNative, offFixupJumpToUseOtherBitSp, off);
|
---|
5507 | if ((pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT)
|
---|
5508 | off = iemNativeEmitStackPushUse16Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem);
|
---|
5509 | else
|
---|
5510 | off = iemNativeEmitStackPushUse32Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem);
|
---|
5511 | off = iemNativeEmitJmpToFixedEx(pCodeBuf, off, offLabelSpUpdateEnd);
|
---|
5512 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
5513 | }
|
---|
5514 |
|
---|
5515 | /*
|
---|
5516 | * TlbMiss:
|
---|
5517 | *
|
---|
5518 | * Call helper to do the pushing.
|
---|
5519 | */
|
---|
5520 | iemNativeLabelDefine(pReNative, idxLabelTlbMiss, off);
|
---|
5521 |
|
---|
5522 | #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
5523 | off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
|
---|
5524 | #else
|
---|
5525 | RT_NOREF(idxInstr);
|
---|
5526 | #endif
|
---|
5527 |
|
---|
5528 | /* Save variables in volatile registers. */
|
---|
5529 | uint32_t const fHstRegsNotToSave = TlbState.getRegsNotToSave()
|
---|
5530 | | (idxRegMemResult < RT_ELEMENTS(pReNative->Core.aHstRegs) ? RT_BIT_32(idxRegMemResult) : 0)
|
---|
5531 | | (idxRegEffSp != idxRegRsp ? RT_BIT_32(idxRegEffSp) : 0)
|
---|
5532 | | (idxRegValue < RT_ELEMENTS(pReNative->Core.aHstRegs) ? RT_BIT_32(idxRegValue) : 0);
|
---|
5533 | off = iemNativeVarSaveVolatileRegsPreHlpCall(pReNative, off, fHstRegsNotToSave);
|
---|
5534 |
|
---|
5535 | if ( idxRegValue == IEMNATIVE_CALL_ARG1_GREG
|
---|
5536 | && idxRegEffSp == IEMNATIVE_CALL_ARG2_GREG)
|
---|
5537 | {
|
---|
5538 | /* Swap them using ARG0 as temp register: */
|
---|
5539 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_CALL_ARG1_GREG);
|
---|
5540 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, IEMNATIVE_CALL_ARG2_GREG);
|
---|
5541 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, IEMNATIVE_CALL_ARG0_GREG);
|
---|
5542 | }
|
---|
5543 | else if (idxRegEffSp != IEMNATIVE_CALL_ARG2_GREG)
|
---|
5544 | {
|
---|
5545 | /* IEMNATIVE_CALL_ARG2_GREG = idxVarValue (first!) */
|
---|
5546 | off = iemNativeEmitLoadArgGregFromImmOrStackVar(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, idxVarValue,
|
---|
5547 | 0 /*offAddend*/, IEMNATIVE_CALL_VOLATILE_GREG_MASK);
|
---|
5548 |
|
---|
5549 | /* IEMNATIVE_CALL_ARG1_GREG = idxRegEffSp */
|
---|
5550 | if (idxRegEffSp != IEMNATIVE_CALL_ARG1_GREG)
|
---|
5551 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxRegEffSp);
|
---|
5552 | }
|
---|
5553 | else
|
---|
5554 | {
|
---|
5555 | /* IEMNATIVE_CALL_ARG1_GREG = idxRegEffSp (first!) */
|
---|
5556 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxRegEffSp);
|
---|
5557 |
|
---|
5558 | /* IEMNATIVE_CALL_ARG2_GREG = idxVarValue */
|
---|
5559 | off = iemNativeEmitLoadArgGregFromImmOrStackVar(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, idxVarValue, 0 /*offAddend*/,
|
---|
5560 | IEMNATIVE_CALL_VOLATILE_GREG_MASK & ~IEMNATIVE_CALL_ARG1_GREG);
|
---|
5561 | }
|
---|
5562 |
|
---|
5563 | /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */
|
---|
5564 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
5565 |
|
---|
5566 | /* Done setting up parameters, make the call. */
|
---|
5567 | off = iemNativeEmitCallImm(pReNative, off, pfnFunction);
|
---|
5568 |
|
---|
5569 | /* Restore variables and guest shadow registers to volatile registers. */
|
---|
5570 | off = iemNativeVarRestoreVolatileRegsPostHlpCall(pReNative, off, fHstRegsNotToSave);
|
---|
5571 | off = iemNativeRegRestoreGuestShadowsInVolatileRegs(pReNative, off, TlbState.getActiveRegsWithShadows());
|
---|
5572 |
|
---|
5573 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP
|
---|
5574 | if (!TlbState.fSkip)
|
---|
5575 | {
|
---|
5576 | /* end of TlbMiss - Jump to the done label. */
|
---|
5577 | uint32_t const idxLabelTlbDone = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbDone, UINT32_MAX, uTlbSeqNo);
|
---|
5578 | off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbDone);
|
---|
5579 |
|
---|
5580 | /*
|
---|
5581 | * TlbLookup:
|
---|
5582 | */
|
---|
5583 | off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMemAccess, cbMemAccess - 1,
|
---|
5584 | IEM_ACCESS_TYPE_WRITE, idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult);
|
---|
5585 |
|
---|
5586 | /*
|
---|
5587 | * Emit code to do the actual storing / fetching.
|
---|
5588 | */
|
---|
5589 | PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 64);
|
---|
5590 | # ifdef VBOX_WITH_STATISTICS
|
---|
5591 | off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, TlbState.idxReg1, TlbState.idxReg2,
|
---|
5592 | RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeTlbHitsForStack));
|
---|
5593 | # endif
|
---|
5594 | if (idxRegValue != UINT8_MAX)
|
---|
5595 | {
|
---|
5596 | switch (cbMemAccess)
|
---|
5597 | {
|
---|
5598 | case 2:
|
---|
5599 | off = iemNativeEmitStoreGpr16ByGprEx(pCodeBuf, off, idxRegValue, idxRegMemResult);
|
---|
5600 | break;
|
---|
5601 | case 4:
|
---|
5602 | if (!fIsIntelSeg)
|
---|
5603 | off = iemNativeEmitStoreGpr32ByGprEx(pCodeBuf, off, idxRegValue, idxRegMemResult);
|
---|
5604 | else
|
---|
5605 | {
|
---|
5606 | /* intel real mode segment push. 10890XE adds the 2nd of half EFLAGS to a
|
---|
5607 | PUSH FS in real mode, so we have to try emulate that here.
|
---|
5608 | We borrow the now unused idxReg1 from the TLB lookup code here. */
|
---|
5609 | uint8_t idxRegEfl = iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(pReNative, &off,
|
---|
5610 | kIemNativeGstReg_EFlags);
|
---|
5611 | if (idxRegEfl != UINT8_MAX)
|
---|
5612 | {
|
---|
5613 | #ifdef ARCH_AMD64
|
---|
5614 | off = iemNativeEmitLoadGprFromGpr32(pReNative, off, TlbState.idxReg1, idxRegEfl);
|
---|
5615 | off = iemNativeEmitAndGpr32ByImm(pReNative, off, TlbState.idxReg1,
|
---|
5616 | UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK);
|
---|
5617 | #else
|
---|
5618 | off = iemNativeEmitGpr32EqGprAndImmEx(iemNativeInstrBufEnsure(pReNative, off, 3),
|
---|
5619 | off, TlbState.idxReg1, idxRegEfl,
|
---|
5620 | UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK);
|
---|
5621 | #endif
|
---|
5622 | iemNativeRegFreeTmp(pReNative, idxRegEfl);
|
---|
5623 | }
|
---|
5624 | else
|
---|
5625 | {
|
---|
5626 | off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, TlbState.idxReg1,
|
---|
5627 | RT_UOFFSETOF(VMCPUCC, cpum.GstCtx.eflags));
|
---|
5628 | off = iemNativeEmitAndGpr32ByImm(pReNative, off, TlbState.idxReg1,
|
---|
5629 | UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK);
|
---|
5630 | }
|
---|
5631 | /* ASSUMES the upper half of idxRegValue is ZERO. */
|
---|
5632 | off = iemNativeEmitOrGpr32ByGpr(pReNative, off, TlbState.idxReg1, idxRegValue);
|
---|
5633 | off = iemNativeEmitStoreGpr32ByGprEx(pCodeBuf, off, TlbState.idxReg1, idxRegMemResult);
|
---|
5634 | }
|
---|
5635 | break;
|
---|
5636 | case 8:
|
---|
5637 | off = iemNativeEmitStoreGpr64ByGprEx(pCodeBuf, off, idxRegValue, idxRegMemResult);
|
---|
5638 | break;
|
---|
5639 | default:
|
---|
5640 | AssertFailed();
|
---|
5641 | }
|
---|
5642 | }
|
---|
5643 | else
|
---|
5644 | {
|
---|
5645 | switch (cbMemAccess)
|
---|
5646 | {
|
---|
5647 | case 2:
|
---|
5648 | off = iemNativeEmitStoreImm16ByGprEx(pCodeBuf, off, (uint16_t)pVarValue->u.uValue,
|
---|
5649 | idxRegMemResult, TlbState.idxReg1);
|
---|
5650 | break;
|
---|
5651 | case 4:
|
---|
5652 | Assert(!fIsSegReg);
|
---|
5653 | off = iemNativeEmitStoreImm32ByGprEx(pCodeBuf, off, (uint32_t)pVarValue->u.uValue,
|
---|
5654 | idxRegMemResult, TlbState.idxReg1);
|
---|
5655 | break;
|
---|
5656 | case 8:
|
---|
5657 | off = iemNativeEmitStoreImm64ByGprEx(pCodeBuf, off, pVarValue->u.uValue, idxRegMemResult, TlbState.idxReg1);
|
---|
5658 | break;
|
---|
5659 | default:
|
---|
5660 | AssertFailed();
|
---|
5661 | }
|
---|
5662 | }
|
---|
5663 |
|
---|
5664 | iemNativeRegFreeTmp(pReNative, idxRegMemResult);
|
---|
5665 | TlbState.freeRegsAndReleaseVars(pReNative);
|
---|
5666 |
|
---|
5667 | /*
|
---|
5668 | * TlbDone:
|
---|
5669 | *
|
---|
5670 | * Commit the new RSP value.
|
---|
5671 | */
|
---|
5672 | iemNativeLabelDefine(pReNative, idxLabelTlbDone, off);
|
---|
5673 | }
|
---|
5674 | #endif /* IEMNATIVE_WITH_TLB_LOOKUP */
|
---|
5675 |
|
---|
5676 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxRegRsp, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.rsp));
|
---|
5677 | iemNativeRegFreeTmp(pReNative, idxRegRsp);
|
---|
5678 | if (idxRegEffSp != idxRegRsp)
|
---|
5679 | iemNativeRegFreeTmp(pReNative, idxRegEffSp);
|
---|
5680 |
|
---|
5681 | /* The value variable is implictly flushed. */
|
---|
5682 | if (idxRegValue != UINT8_MAX)
|
---|
5683 | iemNativeVarRegisterRelease(pReNative, idxVarValue);
|
---|
5684 | iemNativeVarFreeLocal(pReNative, idxVarValue);
|
---|
5685 |
|
---|
5686 | return off;
|
---|
5687 | }
|
---|
5688 |
|
---|
5689 |
|
---|
5690 |
|
---|
5691 | /* RT_MAKE_U32_FROM_U8(cBitsVar, cBitsFlat, 0, 0) */
|
---|
5692 | #define IEM_MC_POP_GREG_U16(a_iGReg) \
|
---|
5693 | off = iemNativeEmitStackPopGReg(pReNative, off, a_iGReg, RT_MAKE_U32_FROM_U8(16, 0, 0, 0), \
|
---|
5694 | (uintptr_t)iemNativeHlpStackFetchU16, pCallEntry->idxInstr)
|
---|
5695 | #define IEM_MC_POP_GREG_U32(a_iGReg) \
|
---|
5696 | off = iemNativeEmitStackPopGReg(pReNative, off, a_iGReg, RT_MAKE_U32_FROM_U8(32, 0, 0, 0), \
|
---|
5697 | (uintptr_t)iemNativeHlpStackFetchU32, pCallEntry->idxInstr)
|
---|
5698 | #define IEM_MC_POP_GREG_U64(a_iGReg) \
|
---|
5699 | off = iemNativeEmitStackPopGReg(pReNative, off, a_iGReg, RT_MAKE_U32_FROM_U8(64, 0, 0, 0), \
|
---|
5700 | (uintptr_t)iemNativeHlpStackFetchU64, pCallEntry->idxInstr)
|
---|
5701 |
|
---|
5702 | #define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg) \
|
---|
5703 | off = iemNativeEmitStackPopGReg(pReNative, off, a_iGReg, RT_MAKE_U32_FROM_U8(16, 32, 0, 0), \
|
---|
5704 | (uintptr_t)iemNativeHlpStackFlatFetchU16, pCallEntry->idxInstr)
|
---|
5705 | #define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg) \
|
---|
5706 | off = iemNativeEmitStackPopGReg(pReNative, off, a_iGReg, RT_MAKE_U32_FROM_U8(32, 32, 0, 0), \
|
---|
5707 | (uintptr_t)iemNativeHlpStackFlatFetchU32, pCallEntry->idxInstr)
|
---|
5708 |
|
---|
5709 | #define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg) \
|
---|
5710 | off = iemNativeEmitStackPopGReg(pReNative, off, a_iGReg, RT_MAKE_U32_FROM_U8(16, 64, 0, 0), \
|
---|
5711 | (uintptr_t)iemNativeHlpStackFlatFetchU16, pCallEntry->idxInstr)
|
---|
5712 | #define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg) \
|
---|
5713 | off = iemNativeEmitStackPopGReg(pReNative, off, a_iGReg, RT_MAKE_U32_FROM_U8(64, 64, 0, 0), \
|
---|
5714 | (uintptr_t)iemNativeHlpStackFlatFetchU64, pCallEntry->idxInstr)
|
---|
5715 |
|
---|
5716 |
|
---|
5717 | DECL_FORCE_INLINE_THROW(uint32_t)
|
---|
5718 | iemNativeEmitStackPopUse16Sp(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t idxRegRsp, uint8_t idxRegEffSp, uint8_t cbMem,
|
---|
5719 | uint8_t idxRegTmp)
|
---|
5720 | {
|
---|
5721 | /* Use16BitSp: */
|
---|
5722 | #ifdef RT_ARCH_AMD64
|
---|
5723 | off = iemNativeEmitLoadGprFromGpr16Ex(pCodeBuf, off, idxRegEffSp, idxRegRsp);
|
---|
5724 | off = iemNativeEmitAddGpr16ImmEx(pCodeBuf, off, idxRegRsp, cbMem); /* ASSUMES this does NOT modify bits [63:16]! */
|
---|
5725 | RT_NOREF(idxRegTmp);
|
---|
5726 | #else
|
---|
5727 | /* ubfiz regeff, regrsp, #0, #16 - copies bits 15:0 from RSP to EffSp bits 15:0, zeroing bits 63:16. */
|
---|
5728 | pCodeBuf[off++] = Armv8A64MkInstrUbfiz(idxRegEffSp, idxRegRsp, 0, 16, false /*f64Bit*/);
|
---|
5729 | /* add tmp, regrsp, #cbMem */
|
---|
5730 | pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegTmp, idxRegRsp, cbMem, false /*f64Bit*/);
|
---|
5731 | /* and tmp, tmp, #0xffff */
|
---|
5732 | Assert(Armv8A64ConvertImmRImmS2Mask32(15, 0) == 0xffff);
|
---|
5733 | pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxRegTmp, idxRegTmp, 15, 0, false /*f64Bit*/);
|
---|
5734 | /* bfi regrsp, regeff, #0, #16 - moves bits 15:0 from tmp to RSP bits 15:0, keeping the other RSP bits as is. */
|
---|
5735 | pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegRsp, idxRegTmp, 0, 16, false /*f64Bit*/);
|
---|
5736 | #endif
|
---|
5737 | return off;
|
---|
5738 | }
|
---|
5739 |
|
---|
5740 |
|
---|
5741 | DECL_FORCE_INLINE(uint32_t)
|
---|
5742 | iemNativeEmitStackPopUse32Sp(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t idxRegRsp, uint8_t idxRegEffSp, uint8_t cbMem)
|
---|
5743 | {
|
---|
5744 | /* Use32BitSp: */
|
---|
5745 | off = iemNativeEmitLoadGprFromGpr32Ex(pCodeBuf, off, idxRegEffSp, idxRegRsp);
|
---|
5746 | off = iemNativeEmitAddGpr32ImmEx(pCodeBuf, off, idxRegRsp, cbMem);
|
---|
5747 | return off;
|
---|
5748 | }
|
---|
5749 |
|
---|
5750 |
|
---|
5751 | /** IEM_MC[|_FLAT32|_FLAT64]_POP_GREG_U16/32/64 */
|
---|
5752 | DECL_INLINE_THROW(uint32_t)
|
---|
5753 | iemNativeEmitStackPopGReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxGReg,
|
---|
5754 | uint32_t cBitsVarAndFlat, uintptr_t pfnFunction, uint8_t idxInstr)
|
---|
5755 | {
|
---|
5756 | /*
|
---|
5757 | * Assert sanity.
|
---|
5758 | */
|
---|
5759 | Assert(idxGReg < 16);
|
---|
5760 | #ifdef VBOX_STRICT
|
---|
5761 | if (RT_BYTE2(cBitsVarAndFlat) != 0)
|
---|
5762 | {
|
---|
5763 | Assert( (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
|
---|
5764 | || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
|
---|
5765 | || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT);
|
---|
5766 | Assert( pfnFunction
|
---|
5767 | == ( cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 32, 0, 0) ? (uintptr_t)iemNativeHlpStackFlatFetchU16
|
---|
5768 | : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 32, 0, 0) ? (uintptr_t)iemNativeHlpStackFlatFetchU32
|
---|
5769 | : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 64, 0, 0) ? (uintptr_t)iemNativeHlpStackFlatFetchU16
|
---|
5770 | : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(64, 64, 0, 0) ? (uintptr_t)iemNativeHlpStackFlatFetchU64
|
---|
5771 | : UINT64_C(0xc000b000a0009000) ));
|
---|
5772 | }
|
---|
5773 | else
|
---|
5774 | Assert( pfnFunction
|
---|
5775 | == ( cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackFetchU16
|
---|
5776 | : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackFetchU32
|
---|
5777 | : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(64, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackFetchU64
|
---|
5778 | : UINT64_C(0xc000b000a0009000) ));
|
---|
5779 | #endif
|
---|
5780 |
|
---|
5781 | #ifdef VBOX_STRICT
|
---|
5782 | /*
|
---|
5783 | * Check that the fExec flags we've got make sense.
|
---|
5784 | */
|
---|
5785 | off = iemNativeEmitExecFlagsCheck(pReNative, off, pReNative->fExec);
|
---|
5786 | #endif
|
---|
5787 |
|
---|
5788 | /*
|
---|
5789 | * To keep things simple we have to commit any pending writes first as we
|
---|
5790 | * may end up making calls.
|
---|
5791 | */
|
---|
5792 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
5793 |
|
---|
5794 | /*
|
---|
5795 | * Determine the effective stack pointer, for non-FLAT modes we also update RSP.
|
---|
5796 | * For FLAT modes we'll do this in TlbDone as we'll be using the incoming RSP
|
---|
5797 | * directly as the effective stack pointer.
|
---|
5798 | * (Code structure is very similar to that of PUSH)
|
---|
5799 | */
|
---|
5800 | uint8_t const cbMem = RT_BYTE1(cBitsVarAndFlat) / 8;
|
---|
5801 | uint8_t const cBitsFlat = RT_BYTE2(cBitsVarAndFlat); RT_NOREF(cBitsFlat);
|
---|
5802 | uint8_t const idxRegRsp = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xSP),
|
---|
5803 | kIemNativeGstRegUse_ForUpdate, true /*fNoVolatileRegs*/);
|
---|
5804 | uint8_t const idxRegEffSp = cBitsFlat != 0 ? idxRegRsp : iemNativeRegAllocTmp(pReNative, &off);
|
---|
5805 | /** @todo can do a better job picking the register here. For cbMem >= 4 this
|
---|
5806 | * will be the resulting register value. */
|
---|
5807 | uint8_t const idxRegMemResult = iemNativeRegAllocTmp(pReNative, &off); /* pointer then value; arm64 SP += 2/4 helper too. */
|
---|
5808 |
|
---|
5809 | uint32_t offFixupJumpToUseOtherBitSp = UINT32_MAX;
|
---|
5810 | if (cBitsFlat != 0)
|
---|
5811 | {
|
---|
5812 | Assert(idxRegEffSp == idxRegRsp);
|
---|
5813 | Assert(cBitsFlat == 32 || cBitsFlat == 64);
|
---|
5814 | Assert(IEM_F_MODE_X86_IS_FLAT(pReNative->fExec));
|
---|
5815 | }
|
---|
5816 | else /** @todo We can skip the test if we're targeting pre-386 CPUs. */
|
---|
5817 | {
|
---|
5818 | Assert(idxRegEffSp != idxRegRsp);
|
---|
5819 | uint8_t const idxRegSsAttr = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_ATTRIB(X86_SREG_SS),
|
---|
5820 | kIemNativeGstRegUse_ReadOnly);
|
---|
5821 | #ifdef RT_ARCH_AMD64
|
---|
5822 | PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);
|
---|
5823 | #else
|
---|
5824 | PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
|
---|
5825 | #endif
|
---|
5826 | off = iemNativeEmitTestAnyBitsInGpr32Ex(pCodeBuf, off, idxRegSsAttr, X86DESCATTR_D);
|
---|
5827 | iemNativeRegFreeTmp(pReNative, idxRegSsAttr);
|
---|
5828 | offFixupJumpToUseOtherBitSp = off;
|
---|
5829 | if ((pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT)
|
---|
5830 | {
|
---|
5831 | /** @todo can skip idxRegRsp updating when popping ESP. */
|
---|
5832 | off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off /*8-bit suffices*/, kIemNativeInstrCond_e); /* jump if zero */
|
---|
5833 | off = iemNativeEmitStackPopUse32Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem);
|
---|
5834 | }
|
---|
5835 | else
|
---|
5836 | {
|
---|
5837 | off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off /*8-bit suffices*/, kIemNativeInstrCond_ne); /* jump if not zero */
|
---|
5838 | off = iemNativeEmitStackPopUse16Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem, idxRegMemResult);
|
---|
5839 | }
|
---|
5840 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
5841 | }
|
---|
5842 | /* SpUpdateEnd: */
|
---|
5843 | uint32_t const offLabelSpUpdateEnd = off;
|
---|
5844 |
|
---|
5845 | /*
|
---|
5846 | * Okay, now prepare for TLB lookup and jump to code (or the TlbMiss if
|
---|
5847 | * we're skipping lookup).
|
---|
5848 | */
|
---|
5849 | uint8_t const iSegReg = cBitsFlat != 0 ? UINT8_MAX : X86_SREG_SS;
|
---|
5850 | IEMNATIVEEMITTLBSTATE const TlbState(pReNative, idxRegEffSp, &off, iSegReg, cbMem);
|
---|
5851 | uint16_t const uTlbSeqNo = pReNative->uTlbSeqNo++;
|
---|
5852 | uint32_t const idxLabelTlbMiss = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbMiss, UINT32_MAX, uTlbSeqNo);
|
---|
5853 | uint32_t const idxLabelTlbLookup = !TlbState.fSkip
|
---|
5854 | ? iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbLookup, UINT32_MAX, uTlbSeqNo)
|
---|
5855 | : UINT32_MAX;
|
---|
5856 |
|
---|
5857 | if (!TlbState.fSkip)
|
---|
5858 | off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbLookup); /** @todo short jump */
|
---|
5859 | else
|
---|
5860 | off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbMiss); /** @todo short jump */
|
---|
5861 |
|
---|
5862 | /*
|
---|
5863 | * Use16BitSp:
|
---|
5864 | */
|
---|
5865 | if (cBitsFlat == 0)
|
---|
5866 | {
|
---|
5867 | #ifdef RT_ARCH_AMD64
|
---|
5868 | PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);
|
---|
5869 | #else
|
---|
5870 | PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
|
---|
5871 | #endif
|
---|
5872 | iemNativeFixupFixedJump(pReNative, offFixupJumpToUseOtherBitSp, off);
|
---|
5873 | if ((pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT)
|
---|
5874 | off = iemNativeEmitStackPopUse16Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem, idxRegMemResult);
|
---|
5875 | else
|
---|
5876 | off = iemNativeEmitStackPopUse32Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem);
|
---|
5877 | off = iemNativeEmitJmpToFixedEx(pCodeBuf, off, offLabelSpUpdateEnd);
|
---|
5878 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
5879 | }
|
---|
5880 |
|
---|
5881 | /*
|
---|
5882 | * TlbMiss:
|
---|
5883 | *
|
---|
5884 | * Call helper to do the pushing.
|
---|
5885 | */
|
---|
5886 | iemNativeLabelDefine(pReNative, idxLabelTlbMiss, off);
|
---|
5887 |
|
---|
5888 | #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
5889 | off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
|
---|
5890 | #else
|
---|
5891 | RT_NOREF(idxInstr);
|
---|
5892 | #endif
|
---|
5893 |
|
---|
5894 | uint32_t const fHstRegsNotToSave = TlbState.getRegsNotToSave()
|
---|
5895 | | (idxRegMemResult < RT_ELEMENTS(pReNative->Core.aHstRegs) ? RT_BIT_32(idxRegMemResult) : 0)
|
---|
5896 | | (idxRegEffSp != idxRegRsp ? RT_BIT_32(idxRegEffSp) : 0);
|
---|
5897 | off = iemNativeVarSaveVolatileRegsPreHlpCall(pReNative, off, fHstRegsNotToSave);
|
---|
5898 |
|
---|
5899 |
|
---|
5900 | /* IEMNATIVE_CALL_ARG1_GREG = EffSp/RSP */
|
---|
5901 | if (idxRegEffSp != IEMNATIVE_CALL_ARG1_GREG)
|
---|
5902 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxRegEffSp);
|
---|
5903 |
|
---|
5904 | /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */
|
---|
5905 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
5906 |
|
---|
5907 | /* Done setting up parameters, make the call. */
|
---|
5908 | off = iemNativeEmitCallImm(pReNative, off, pfnFunction);
|
---|
5909 |
|
---|
5910 | /* Move the return register content to idxRegMemResult. */
|
---|
5911 | if (idxRegMemResult != IEMNATIVE_CALL_RET_GREG)
|
---|
5912 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegMemResult, IEMNATIVE_CALL_RET_GREG);
|
---|
5913 |
|
---|
5914 | /* Restore variables and guest shadow registers to volatile registers. */
|
---|
5915 | off = iemNativeVarRestoreVolatileRegsPostHlpCall(pReNative, off, fHstRegsNotToSave);
|
---|
5916 | off = iemNativeRegRestoreGuestShadowsInVolatileRegs(pReNative, off, TlbState.getActiveRegsWithShadows());
|
---|
5917 |
|
---|
5918 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP
|
---|
5919 | if (!TlbState.fSkip)
|
---|
5920 | {
|
---|
5921 | /* end of TlbMiss - Jump to the done label. */
|
---|
5922 | uint32_t const idxLabelTlbDone = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbDone, UINT32_MAX, uTlbSeqNo);
|
---|
5923 | off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbDone);
|
---|
5924 |
|
---|
5925 | /*
|
---|
5926 | * TlbLookup:
|
---|
5927 | */
|
---|
5928 | off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMem, cbMem - 1, IEM_ACCESS_TYPE_READ,
|
---|
5929 | idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult);
|
---|
5930 |
|
---|
5931 | /*
|
---|
5932 | * Emit code to load the value (from idxRegMemResult into idxRegMemResult).
|
---|
5933 | */
|
---|
5934 | PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);
|
---|
5935 | # ifdef VBOX_WITH_STATISTICS
|
---|
5936 | off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, TlbState.idxReg1, TlbState.idxReg2,
|
---|
5937 | RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeTlbHitsForStack));
|
---|
5938 | # endif
|
---|
5939 | switch (cbMem)
|
---|
5940 | {
|
---|
5941 | case 2:
|
---|
5942 | off = iemNativeEmitLoadGprByGprU16Ex(pCodeBuf, off, idxRegMemResult, idxRegMemResult);
|
---|
5943 | break;
|
---|
5944 | case 4:
|
---|
5945 | off = iemNativeEmitLoadGprByGprU32Ex(pCodeBuf, off, idxRegMemResult, idxRegMemResult);
|
---|
5946 | break;
|
---|
5947 | case 8:
|
---|
5948 | off = iemNativeEmitLoadGprByGprU64Ex(pCodeBuf, off, idxRegMemResult, idxRegMemResult);
|
---|
5949 | break;
|
---|
5950 | default:
|
---|
5951 | AssertFailed();
|
---|
5952 | }
|
---|
5953 |
|
---|
5954 | TlbState.freeRegsAndReleaseVars(pReNative);
|
---|
5955 |
|
---|
5956 | /*
|
---|
5957 | * TlbDone:
|
---|
5958 | *
|
---|
5959 | * Set the new RSP value (FLAT accesses needs to calculate it first) and
|
---|
5960 | * commit the popped register value.
|
---|
5961 | */
|
---|
5962 | iemNativeLabelDefine(pReNative, idxLabelTlbDone, off);
|
---|
5963 | }
|
---|
5964 | #endif /* IEMNATIVE_WITH_TLB_LOOKUP */
|
---|
5965 |
|
---|
5966 | if (idxGReg != X86_GREG_xSP)
|
---|
5967 | {
|
---|
5968 | /* Set the register. */
|
---|
5969 | if (cbMem >= sizeof(uint32_t))
|
---|
5970 | {
|
---|
5971 | #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
|
---|
5972 | AssertMsg( pReNative->idxCurCall == 0
|
---|
5973 | || IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, IEMNATIVEGSTREG_GPR(idxGReg))),
|
---|
5974 | ("%s - %u\n", g_aGstShadowInfo[idxGReg].pszName, iemNativeLivenessGetPrevStateByGstReg(pReNative, IEMNATIVEGSTREG_GPR(idxGReg))));
|
---|
5975 | #endif
|
---|
5976 | iemNativeRegClearAndMarkAsGstRegShadow(pReNative, idxRegMemResult, IEMNATIVEGSTREG_GPR(idxGReg), off);
|
---|
5977 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxRegMemResult,
|
---|
5978 | RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[idxGReg]));
|
---|
5979 | }
|
---|
5980 | else
|
---|
5981 | {
|
---|
5982 | Assert(cbMem == sizeof(uint16_t));
|
---|
5983 | uint8_t const idxRegDst = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(idxGReg),
|
---|
5984 | kIemNativeGstRegUse_ForUpdate);
|
---|
5985 | off = iemNativeEmitGprMergeInGpr16(pReNative, off, idxRegDst, idxRegMemResult);
|
---|
5986 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxRegDst, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[idxGReg]));
|
---|
5987 | iemNativeRegFreeTmp(pReNative, idxRegDst);
|
---|
5988 | }
|
---|
5989 |
|
---|
5990 | /* Complete RSP calculation for FLAT mode. */
|
---|
5991 | if (idxRegEffSp == idxRegRsp)
|
---|
5992 | {
|
---|
5993 | if (cBitsFlat == 64)
|
---|
5994 | off = iemNativeEmitAddGprImm8(pReNative, off, idxRegRsp, sizeof(uint64_t));
|
---|
5995 | else
|
---|
5996 | off = iemNativeEmitAddGpr32Imm8(pReNative, off, idxRegRsp, sizeof(uint32_t));
|
---|
5997 | }
|
---|
5998 | }
|
---|
5999 | else
|
---|
6000 | {
|
---|
6001 | /* We're popping RSP, ESP or SP. Only the is a bit extra work, of course. */
|
---|
6002 | if (cbMem == sizeof(uint64_t))
|
---|
6003 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegRsp, idxRegMemResult);
|
---|
6004 | else if (cbMem == sizeof(uint32_t))
|
---|
6005 | off = iemNativeEmitLoadGprFromGpr32(pReNative, off, idxRegRsp, idxRegMemResult);
|
---|
6006 | else
|
---|
6007 | {
|
---|
6008 | if (idxRegEffSp == idxRegRsp)
|
---|
6009 | {
|
---|
6010 | if (cBitsFlat == 64)
|
---|
6011 | off = iemNativeEmitAddGprImm8(pReNative, off, idxRegRsp, sizeof(uint64_t));
|
---|
6012 | else
|
---|
6013 | off = iemNativeEmitAddGpr32Imm8(pReNative, off, idxRegRsp, sizeof(uint32_t));
|
---|
6014 | }
|
---|
6015 | off = iemNativeEmitGprMergeInGpr16(pReNative, off, idxRegRsp, idxRegMemResult);
|
---|
6016 | }
|
---|
6017 | }
|
---|
6018 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxRegRsp, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rsp));
|
---|
6019 |
|
---|
6020 | iemNativeRegFreeTmp(pReNative, idxRegRsp);
|
---|
6021 | if (idxRegEffSp != idxRegRsp)
|
---|
6022 | iemNativeRegFreeTmp(pReNative, idxRegEffSp);
|
---|
6023 | iemNativeRegFreeTmp(pReNative, idxRegMemResult);
|
---|
6024 |
|
---|
6025 | return off;
|
---|
6026 | }
|
---|
6027 |
|
---|
6028 |
|
---|
6029 |
|
---|
6030 | /*********************************************************************************************************************************
|
---|
6031 | * Memory mapping (IEM_MEM_MAP_XXX, IEM_MEM_FLAT_MAP_XXX). *
|
---|
6032 | *********************************************************************************************************************************/
|
---|
6033 |
|
---|
6034 | #define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6035 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint8_t), \
|
---|
6036 | IEM_ACCESS_DATA_ATOMIC, 0 /*fAlignMask*/, \
|
---|
6037 | (uintptr_t)iemNativeHlpMemMapDataU8Atomic, pCallEntry->idxInstr)
|
---|
6038 |
|
---|
6039 | #define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6040 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint8_t), \
|
---|
6041 | IEM_ACCESS_DATA_RW, 0 /*fAlignMask*/, \
|
---|
6042 | (uintptr_t)iemNativeHlpMemMapDataU8Rw, pCallEntry->idxInstr)
|
---|
6043 |
|
---|
6044 | #define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6045 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint8_t), \
|
---|
6046 | IEM_ACCESS_DATA_W, 0 /*fAlignMask*/, \
|
---|
6047 | (uintptr_t)iemNativeHlpMemMapDataU8Wo, pCallEntry->idxInstr) \
|
---|
6048 |
|
---|
6049 | #define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6050 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint8_t), \
|
---|
6051 | IEM_ACCESS_DATA_R, 0 /*fAlignMask*/, \
|
---|
6052 | (uintptr_t)iemNativeHlpMemMapDataU8Ro, pCallEntry->idxInstr)
|
---|
6053 |
|
---|
6054 |
|
---|
6055 | #define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6056 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint16_t), \
|
---|
6057 | IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1 /*fAlignMask*/, \
|
---|
6058 | (uintptr_t)iemNativeHlpMemMapDataU16Atomic, pCallEntry->idxInstr)
|
---|
6059 |
|
---|
6060 | #define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6061 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint16_t), \
|
---|
6062 | IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1 /*fAlignMask*/, \
|
---|
6063 | (uintptr_t)iemNativeHlpMemMapDataU16Rw, pCallEntry->idxInstr)
|
---|
6064 |
|
---|
6065 | #define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6066 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint16_t), \
|
---|
6067 | IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMask*/, \
|
---|
6068 | (uintptr_t)iemNativeHlpMemMapDataU16Wo, pCallEntry->idxInstr) \
|
---|
6069 |
|
---|
6070 | #define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6071 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint16_t), \
|
---|
6072 | IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1 /*fAlignMask*/, \
|
---|
6073 | (uintptr_t)iemNativeHlpMemMapDataU16Ro, pCallEntry->idxInstr)
|
---|
6074 |
|
---|
6075 | #define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6076 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(int16_t), \
|
---|
6077 | IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMask*/, \
|
---|
6078 | (uintptr_t)iemNativeHlpMemMapDataU16Wo, pCallEntry->idxInstr) \
|
---|
6079 |
|
---|
6080 |
|
---|
6081 | #define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6082 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint32_t), \
|
---|
6083 | IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1 /*fAlignMask*/, \
|
---|
6084 | (uintptr_t)iemNativeHlpMemMapDataU32Atomic, pCallEntry->idxInstr)
|
---|
6085 |
|
---|
6086 | #define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6087 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint32_t), \
|
---|
6088 | IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1 /*fAlignMask*/, \
|
---|
6089 | (uintptr_t)iemNativeHlpMemMapDataU32Rw, pCallEntry->idxInstr)
|
---|
6090 |
|
---|
6091 | #define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6092 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint32_t), \
|
---|
6093 | IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMask*/, \
|
---|
6094 | (uintptr_t)iemNativeHlpMemMapDataU32Wo, pCallEntry->idxInstr) \
|
---|
6095 |
|
---|
6096 | #define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6097 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint32_t), \
|
---|
6098 | IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1 /*fAlignMask*/, \
|
---|
6099 | (uintptr_t)iemNativeHlpMemMapDataU32Ro, pCallEntry->idxInstr)
|
---|
6100 |
|
---|
6101 | #define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6102 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(int32_t), \
|
---|
6103 | IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMask*/, \
|
---|
6104 | (uintptr_t)iemNativeHlpMemMapDataU32Wo, pCallEntry->idxInstr) \
|
---|
6105 |
|
---|
6106 |
|
---|
6107 | #define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6108 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint64_t), \
|
---|
6109 | IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1 /*fAlignMask*/, \
|
---|
6110 | (uintptr_t)iemNativeHlpMemMapDataU64Atomic, pCallEntry->idxInstr)
|
---|
6111 |
|
---|
6112 | #define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6113 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint64_t), \
|
---|
6114 | IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1 /*fAlignMask*/, \
|
---|
6115 | (uintptr_t)iemNativeHlpMemMapDataU64Rw, pCallEntry->idxInstr)
|
---|
6116 | #define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6117 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint64_t), \
|
---|
6118 | IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, \
|
---|
6119 | (uintptr_t)iemNativeHlpMemMapDataU64Wo, pCallEntry->idxInstr) \
|
---|
6120 |
|
---|
6121 | #define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6122 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint64_t), \
|
---|
6123 | IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1 /*fAlignMask*/, \
|
---|
6124 | (uintptr_t)iemNativeHlpMemMapDataU64Ro, pCallEntry->idxInstr)
|
---|
6125 |
|
---|
6126 | #define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6127 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(int64_t), \
|
---|
6128 | IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, \
|
---|
6129 | (uintptr_t)iemNativeHlpMemMapDataU64Wo, pCallEntry->idxInstr) \
|
---|
6130 |
|
---|
6131 |
|
---|
6132 | #define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6133 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTFLOAT80U), \
|
---|
6134 | IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, \
|
---|
6135 | (uintptr_t)iemNativeHlpMemMapDataR80Wo, pCallEntry->idxInstr) \
|
---|
6136 |
|
---|
6137 | #define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6138 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTFLOAT80U), \
|
---|
6139 | IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, /** @todo check BCD align */ \
|
---|
6140 | (uintptr_t)iemNativeHlpMemMapDataD80Wo, pCallEntry->idxInstr) \
|
---|
6141 |
|
---|
6142 |
|
---|
6143 | #define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6144 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTUINT128U), \
|
---|
6145 | IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \
|
---|
6146 | (uintptr_t)iemNativeHlpMemMapDataU128Atomic, pCallEntry->idxInstr)
|
---|
6147 |
|
---|
6148 | #define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6149 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTUINT128U), \
|
---|
6150 | IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \
|
---|
6151 | (uintptr_t)iemNativeHlpMemMapDataU128Rw, pCallEntry->idxInstr)
|
---|
6152 |
|
---|
6153 | #define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6154 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTUINT128U), \
|
---|
6155 | IEM_ACCESS_DATA_W, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \
|
---|
6156 | (uintptr_t)iemNativeHlpMemMapDataU128Wo, pCallEntry->idxInstr) \
|
---|
6157 |
|
---|
6158 | #define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
|
---|
6159 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTUINT128U), \
|
---|
6160 | IEM_ACCESS_DATA_R, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \
|
---|
6161 | (uintptr_t)iemNativeHlpMemMapDataU128Ro, pCallEntry->idxInstr)
|
---|
6162 |
|
---|
6163 |
|
---|
6164 |
|
---|
6165 | #define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6166 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint8_t), \
|
---|
6167 | IEM_ACCESS_DATA_ATOMIC, 0 /*fAlignMask*/, \
|
---|
6168 | (uintptr_t)iemNativeHlpMemFlatMapDataU8Atomic, pCallEntry->idxInstr)
|
---|
6169 |
|
---|
6170 | #define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6171 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint8_t), \
|
---|
6172 | IEM_ACCESS_DATA_RW, 0 /*fAlignMask*/, \
|
---|
6173 | (uintptr_t)iemNativeHlpMemFlatMapDataU8Rw, pCallEntry->idxInstr)
|
---|
6174 |
|
---|
6175 | #define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6176 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint8_t), \
|
---|
6177 | IEM_ACCESS_DATA_W, 0 /*fAlignMask*/, \
|
---|
6178 | (uintptr_t)iemNativeHlpMemFlatMapDataU8Wo, pCallEntry->idxInstr) \
|
---|
6179 |
|
---|
6180 | #define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6181 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint8_t), \
|
---|
6182 | IEM_ACCESS_DATA_R, 0 /*fAlignMask*/, \
|
---|
6183 | (uintptr_t)iemNativeHlpMemFlatMapDataU8Ro, pCallEntry->idxInstr)
|
---|
6184 |
|
---|
6185 |
|
---|
6186 | #define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6187 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint16_t), \
|
---|
6188 | IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1 /*fAlignMask*/, \
|
---|
6189 | (uintptr_t)iemNativeHlpMemFlatMapDataU16Atomic, pCallEntry->idxInstr)
|
---|
6190 |
|
---|
6191 | #define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6192 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint16_t), \
|
---|
6193 | IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1 /*fAlignMask*/, \
|
---|
6194 | (uintptr_t)iemNativeHlpMemFlatMapDataU16Rw, pCallEntry->idxInstr)
|
---|
6195 |
|
---|
6196 | #define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6197 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint16_t), \
|
---|
6198 | IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMask*/, \
|
---|
6199 | (uintptr_t)iemNativeHlpMemFlatMapDataU16Wo, pCallEntry->idxInstr) \
|
---|
6200 |
|
---|
6201 | #define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6202 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint16_t), \
|
---|
6203 | IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1 /*fAlignMask*/, \
|
---|
6204 | (uintptr_t)iemNativeHlpMemFlatMapDataU16Ro, pCallEntry->idxInstr)
|
---|
6205 |
|
---|
6206 | #define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6207 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pi16Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(int16_t), \
|
---|
6208 | IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMask*/, \
|
---|
6209 | (uintptr_t)iemNativeHlpMemFlatMapDataU16Wo, pCallEntry->idxInstr) \
|
---|
6210 |
|
---|
6211 |
|
---|
6212 | #define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6213 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint32_t), \
|
---|
6214 | IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1 /*fAlignMask*/, \
|
---|
6215 | (uintptr_t)iemNativeHlpMemFlatMapDataU32Atomic, pCallEntry->idxInstr)
|
---|
6216 |
|
---|
6217 | #define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6218 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint32_t), \
|
---|
6219 | IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1 /*fAlignMask*/, \
|
---|
6220 | (uintptr_t)iemNativeHlpMemFlatMapDataU32Rw, pCallEntry->idxInstr)
|
---|
6221 |
|
---|
6222 | #define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6223 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint32_t), \
|
---|
6224 | IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMask*/, \
|
---|
6225 | (uintptr_t)iemNativeHlpMemFlatMapDataU32Wo, pCallEntry->idxInstr) \
|
---|
6226 |
|
---|
6227 | #define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6228 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint32_t), \
|
---|
6229 | IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1 /*fAlignMask*/, \
|
---|
6230 | (uintptr_t)iemNativeHlpMemFlatMapDataU32Ro, pCallEntry->idxInstr)
|
---|
6231 |
|
---|
6232 | #define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6233 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pi32Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(int32_t), \
|
---|
6234 | IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMask*/, \
|
---|
6235 | (uintptr_t)iemNativeHlpMemFlatMapDataU32Wo, pCallEntry->idxInstr) \
|
---|
6236 |
|
---|
6237 |
|
---|
6238 | #define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6239 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint64_t), \
|
---|
6240 | IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1 /*fAlignMask*/, \
|
---|
6241 | (uintptr_t)iemNativeHlpMemFlatMapDataU64Atomic, pCallEntry->idxInstr)
|
---|
6242 |
|
---|
6243 | #define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6244 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint64_t), \
|
---|
6245 | IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1 /*fAlignMask*/, \
|
---|
6246 | (uintptr_t)iemNativeHlpMemFlatMapDataU64Rw, pCallEntry->idxInstr)
|
---|
6247 |
|
---|
6248 | #define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6249 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint64_t), \
|
---|
6250 | IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, \
|
---|
6251 | (uintptr_t)iemNativeHlpMemFlatMapDataU64Wo, pCallEntry->idxInstr) \
|
---|
6252 |
|
---|
6253 | #define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6254 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint64_t), \
|
---|
6255 | IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1 /*fAlignMask*/, \
|
---|
6256 | (uintptr_t)iemNativeHlpMemFlatMapDataU64Ro, pCallEntry->idxInstr)
|
---|
6257 |
|
---|
6258 | #define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6259 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pi64Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(int64_t), \
|
---|
6260 | IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, \
|
---|
6261 | (uintptr_t)iemNativeHlpMemFlatMapDataU64Wo, pCallEntry->idxInstr) \
|
---|
6262 |
|
---|
6263 |
|
---|
6264 | #define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6265 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pr80Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTFLOAT80U), \
|
---|
6266 | IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, \
|
---|
6267 | (uintptr_t)iemNativeHlpMemFlatMapDataR80Wo, pCallEntry->idxInstr) \
|
---|
6268 |
|
---|
6269 | #define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6270 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pd80Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTFLOAT80U), \
|
---|
6271 | IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, /** @todo check BCD align */ \
|
---|
6272 | (uintptr_t)iemNativeHlpMemFlatMapDataD80Wo, pCallEntry->idxInstr) \
|
---|
6273 |
|
---|
6274 |
|
---|
6275 | #define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6276 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT128U), \
|
---|
6277 | IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \
|
---|
6278 | (uintptr_t)iemNativeHlpMemFlatMapDataU128Atomic, pCallEntry->idxInstr)
|
---|
6279 |
|
---|
6280 | #define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6281 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT128U), \
|
---|
6282 | IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \
|
---|
6283 | (uintptr_t)iemNativeHlpMemFlatMapDataU128Rw, pCallEntry->idxInstr)
|
---|
6284 |
|
---|
6285 | #define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6286 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT128U), \
|
---|
6287 | IEM_ACCESS_DATA_W, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \
|
---|
6288 | (uintptr_t)iemNativeHlpMemFlatMapDataU128Wo, pCallEntry->idxInstr) \
|
---|
6289 |
|
---|
6290 | #define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
|
---|
6291 | off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT128U), \
|
---|
6292 | IEM_ACCESS_DATA_R, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \
|
---|
6293 | (uintptr_t)iemNativeHlpMemFlatMapDataU128Ro, pCallEntry->idxInstr)
|
---|
6294 |
|
---|
6295 |
|
---|
6296 | DECL_INLINE_THROW(uint32_t)
|
---|
6297 | iemNativeEmitMemMapCommon(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarMem, uint8_t idxVarUnmapInfo,
|
---|
6298 | uint8_t iSegReg, uint8_t idxVarGCPtrMem, uint8_t cbMem, uint32_t fAccess, uint8_t fAlignMask,
|
---|
6299 | uintptr_t pfnFunction, uint8_t idxInstr)
|
---|
6300 | {
|
---|
6301 | /*
|
---|
6302 | * Assert sanity.
|
---|
6303 | */
|
---|
6304 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarMem);
|
---|
6305 | PIEMNATIVEVAR const pVarMem = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVarMem)];
|
---|
6306 | AssertStmt( pVarMem->enmKind == kIemNativeVarKind_Invalid
|
---|
6307 | && pVarMem->cbVar == sizeof(void *),
|
---|
6308 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
|
---|
6309 |
|
---|
6310 | PIEMNATIVEVAR const pVarUnmapInfo = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVarUnmapInfo)];
|
---|
6311 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarUnmapInfo);
|
---|
6312 | AssertStmt( pVarUnmapInfo->enmKind == kIemNativeVarKind_Invalid
|
---|
6313 | && pVarUnmapInfo->cbVar == sizeof(uint8_t),
|
---|
6314 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
|
---|
6315 |
|
---|
6316 | PIEMNATIVEVAR const pVarGCPtrMem = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVarGCPtrMem)];
|
---|
6317 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarGCPtrMem);
|
---|
6318 | AssertStmt( pVarGCPtrMem->enmKind == kIemNativeVarKind_Immediate
|
---|
6319 | || pVarGCPtrMem->enmKind == kIemNativeVarKind_Stack,
|
---|
6320 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
|
---|
6321 |
|
---|
6322 | Assert(iSegReg < 6 || iSegReg == UINT8_MAX);
|
---|
6323 |
|
---|
6324 | AssertCompile(IEMNATIVE_CALL_ARG_GREG_COUNT >= 4);
|
---|
6325 |
|
---|
6326 | #ifdef VBOX_STRICT
|
---|
6327 | # define IEM_MAP_HLP_FN_NO_AT(a_fAccess, a_fnBase) \
|
---|
6328 | ( ((a_fAccess) & (IEM_ACCESS_TYPE_MASK | IEM_ACCESS_ATOMIC)) == (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ) \
|
---|
6329 | ? (uintptr_t)RT_CONCAT(a_fnBase,Rw) \
|
---|
6330 | : ((a_fAccess) & (IEM_ACCESS_TYPE_MASK | IEM_ACCESS_ATOMIC)) == IEM_ACCESS_TYPE_READ \
|
---|
6331 | ? (uintptr_t)RT_CONCAT(a_fnBase,Ro) : (uintptr_t)RT_CONCAT(a_fnBase,Wo) )
|
---|
6332 | # define IEM_MAP_HLP_FN(a_fAccess, a_fnBase) \
|
---|
6333 | ( ((a_fAccess) & (IEM_ACCESS_TYPE_MASK | IEM_ACCESS_ATOMIC)) == (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ | IEM_ACCESS_ATOMIC) \
|
---|
6334 | ? (uintptr_t)RT_CONCAT(a_fnBase,Atomic) \
|
---|
6335 | : IEM_MAP_HLP_FN_NO_AT(a_fAccess, a_fnBase) )
|
---|
6336 |
|
---|
6337 | if (iSegReg == UINT8_MAX)
|
---|
6338 | {
|
---|
6339 | Assert( (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
|
---|
6340 | || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
|
---|
6341 | || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT);
|
---|
6342 | switch (cbMem)
|
---|
6343 | {
|
---|
6344 | case 1: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU8)); break;
|
---|
6345 | case 2: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU16)); break;
|
---|
6346 | case 4: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU32)); break;
|
---|
6347 | case 8: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU64)); break;
|
---|
6348 | case 10:
|
---|
6349 | Assert( pfnFunction == (uintptr_t)iemNativeHlpMemFlatMapDataR80Wo
|
---|
6350 | || pfnFunction == (uintptr_t)iemNativeHlpMemFlatMapDataD80Wo);
|
---|
6351 | Assert((fAccess & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
|
---|
6352 | break;
|
---|
6353 | case 16: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU128)); break;
|
---|
6354 | # if 0
|
---|
6355 | case 32: Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemFlatMapDataU256)); break;
|
---|
6356 | case 64: Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemFlatMapDataU512)); break;
|
---|
6357 | # endif
|
---|
6358 | default: AssertFailed(); break;
|
---|
6359 | }
|
---|
6360 | }
|
---|
6361 | else
|
---|
6362 | {
|
---|
6363 | Assert(iSegReg < 6);
|
---|
6364 | switch (cbMem)
|
---|
6365 | {
|
---|
6366 | case 1: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU8)); break;
|
---|
6367 | case 2: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU16)); break;
|
---|
6368 | case 4: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU32)); break;
|
---|
6369 | case 8: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU64)); break;
|
---|
6370 | case 10:
|
---|
6371 | Assert( pfnFunction == (uintptr_t)iemNativeHlpMemMapDataR80Wo
|
---|
6372 | || pfnFunction == (uintptr_t)iemNativeHlpMemMapDataD80Wo);
|
---|
6373 | Assert((fAccess & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
|
---|
6374 | break;
|
---|
6375 | case 16: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU128)); break;
|
---|
6376 | # if 0
|
---|
6377 | case 32: Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemMapDataU256)); break;
|
---|
6378 | case 64: Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemMapDataU512)); break;
|
---|
6379 | # endif
|
---|
6380 | default: AssertFailed(); break;
|
---|
6381 | }
|
---|
6382 | }
|
---|
6383 | # undef IEM_MAP_HLP_FN
|
---|
6384 | # undef IEM_MAP_HLP_FN_NO_AT
|
---|
6385 | #endif
|
---|
6386 |
|
---|
6387 | #ifdef VBOX_STRICT
|
---|
6388 | /*
|
---|
6389 | * Check that the fExec flags we've got make sense.
|
---|
6390 | */
|
---|
6391 | off = iemNativeEmitExecFlagsCheck(pReNative, off, pReNative->fExec);
|
---|
6392 | #endif
|
---|
6393 |
|
---|
6394 | /*
|
---|
6395 | * To keep things simple we have to commit any pending writes first as we
|
---|
6396 | * may end up making calls.
|
---|
6397 | */
|
---|
6398 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
6399 |
|
---|
6400 | #ifdef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP
|
---|
6401 | /*
|
---|
6402 | * Move/spill/flush stuff out of call-volatile registers.
|
---|
6403 | * This is the easy way out. We could contain this to the tlb-miss branch
|
---|
6404 | * by saving and restoring active stuff here.
|
---|
6405 | */
|
---|
6406 | /** @todo save+restore active registers and maybe guest shadows in tlb-miss. */
|
---|
6407 | off = iemNativeRegMoveAndFreeAndFlushAtCall(pReNative, off, 0 /* vacate all non-volatile regs */);
|
---|
6408 | #endif
|
---|
6409 |
|
---|
6410 | /* The bUnmapInfo variable will get a register in the tlb-hit code path,
|
---|
6411 | while the tlb-miss codepath will temporarily put it on the stack.
|
---|
6412 | Set the the type to stack here so we don't need to do it twice below. */
|
---|
6413 | iemNativeVarSetKindToStack(pReNative, idxVarUnmapInfo);
|
---|
6414 | uint8_t const idxRegUnmapInfo = iemNativeVarRegisterAcquire(pReNative, idxVarUnmapInfo, &off);
|
---|
6415 | /** @todo use a tmp register from TlbState, since they'll be free after tlb
|
---|
6416 | * lookup is done. */
|
---|
6417 |
|
---|
6418 | /*
|
---|
6419 | * Define labels and allocate the result register (trying for the return
|
---|
6420 | * register if we can).
|
---|
6421 | */
|
---|
6422 | uint16_t const uTlbSeqNo = pReNative->uTlbSeqNo++;
|
---|
6423 | uint8_t const idxRegMemResult = !(pReNative->Core.bmHstRegs & RT_BIT_32(IEMNATIVE_CALL_RET_GREG))
|
---|
6424 | ? iemNativeVarRegisterSetAndAcquire(pReNative, idxVarMem, IEMNATIVE_CALL_RET_GREG, &off)
|
---|
6425 | : iemNativeVarRegisterAcquire(pReNative, idxVarMem, &off);
|
---|
6426 | IEMNATIVEEMITTLBSTATE const TlbState(pReNative, &off, idxVarGCPtrMem, iSegReg, cbMem);
|
---|
6427 | uint32_t const idxLabelTlbLookup = !TlbState.fSkip
|
---|
6428 | ? iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbLookup, UINT32_MAX, uTlbSeqNo)
|
---|
6429 | : UINT32_MAX;
|
---|
6430 | //off=iemNativeEmitBrk(pReNative, off, 0);
|
---|
6431 | /*
|
---|
6432 | * Jump to the TLB lookup code.
|
---|
6433 | */
|
---|
6434 | if (!TlbState.fSkip)
|
---|
6435 | off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbLookup); /** @todo short jump */
|
---|
6436 |
|
---|
6437 | /*
|
---|
6438 | * TlbMiss:
|
---|
6439 | *
|
---|
6440 | * Call helper to do the fetching.
|
---|
6441 | * We flush all guest register shadow copies here.
|
---|
6442 | */
|
---|
6443 | uint32_t const idxLabelTlbMiss = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbMiss, off, uTlbSeqNo);
|
---|
6444 |
|
---|
6445 | #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
6446 | off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
|
---|
6447 | #else
|
---|
6448 | RT_NOREF(idxInstr);
|
---|
6449 | #endif
|
---|
6450 |
|
---|
6451 | #ifndef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP
|
---|
6452 | /* Save variables in volatile registers. */
|
---|
6453 | uint32_t const fHstRegsNotToSave = TlbState.getRegsNotToSave() | RT_BIT_32(idxRegMemResult) | RT_BIT_32(idxRegUnmapInfo);
|
---|
6454 | off = iemNativeVarSaveVolatileRegsPreHlpCall(pReNative, off, fHstRegsNotToSave);
|
---|
6455 | #endif
|
---|
6456 |
|
---|
6457 | /* IEMNATIVE_CALL_ARG2_GREG = GCPtrMem - load first as it is from a variable. */
|
---|
6458 | off = iemNativeEmitLoadArgGregFromImmOrStackVar(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, idxVarGCPtrMem, 0 /*cbAppend*/,
|
---|
6459 | #ifndef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP
|
---|
6460 | IEMNATIVE_CALL_VOLATILE_GREG_MASK, true /*fSpilledVarsInvolatileRegs*/);
|
---|
6461 | #else
|
---|
6462 | IEMNATIVE_CALL_VOLATILE_GREG_MASK);
|
---|
6463 | #endif
|
---|
6464 |
|
---|
6465 | /* IEMNATIVE_CALL_ARG3_GREG = iSegReg */
|
---|
6466 | if (iSegReg != UINT8_MAX)
|
---|
6467 | {
|
---|
6468 | AssertStmt(iSegReg < 6, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_EMIT_BAD_SEG_REG_NO));
|
---|
6469 | off = iemNativeEmitLoadGpr8Imm(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, iSegReg);
|
---|
6470 | }
|
---|
6471 |
|
---|
6472 | /* IEMNATIVE_CALL_ARG1_GREG = &idxVarUnmapInfo; stackslot address, load any register with result after the call. */
|
---|
6473 | int32_t const offBpDispVarUnmapInfo = iemNativeStackCalcBpDisp(iemNativeVarGetStackSlot(pReNative, idxVarUnmapInfo));
|
---|
6474 | off = iemNativeEmitLeaGprByBp(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, offBpDispVarUnmapInfo);
|
---|
6475 |
|
---|
6476 | /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */
|
---|
6477 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
6478 |
|
---|
6479 | /* Done setting up parameters, make the call. */
|
---|
6480 | off = iemNativeEmitCallImm(pReNative, off, pfnFunction);
|
---|
6481 |
|
---|
6482 | /*
|
---|
6483 | * Put the output in the right registers.
|
---|
6484 | */
|
---|
6485 | Assert(idxRegMemResult == pVarMem->idxReg);
|
---|
6486 | if (idxRegMemResult != IEMNATIVE_CALL_RET_GREG)
|
---|
6487 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegMemResult, IEMNATIVE_CALL_RET_GREG);
|
---|
6488 |
|
---|
6489 | #ifndef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP
|
---|
6490 | /* Restore variables and guest shadow registers to volatile registers. */
|
---|
6491 | off = iemNativeVarRestoreVolatileRegsPostHlpCall(pReNative, off, fHstRegsNotToSave);
|
---|
6492 | off = iemNativeRegRestoreGuestShadowsInVolatileRegs(pReNative, off, TlbState.getActiveRegsWithShadows());
|
---|
6493 | #endif
|
---|
6494 |
|
---|
6495 | Assert(pVarUnmapInfo->idxReg == idxRegUnmapInfo);
|
---|
6496 | off = iemNativeEmitLoadGprByBpU8(pReNative, off, idxRegUnmapInfo, offBpDispVarUnmapInfo);
|
---|
6497 |
|
---|
6498 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP
|
---|
6499 | if (!TlbState.fSkip)
|
---|
6500 | {
|
---|
6501 | /* end of tlbsmiss - Jump to the done label. */
|
---|
6502 | uint32_t const idxLabelTlbDone = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbDone, UINT32_MAX, uTlbSeqNo);
|
---|
6503 | off = iemNativeEmitJmpToLabel(pReNative, off, idxLabelTlbDone);
|
---|
6504 |
|
---|
6505 | /*
|
---|
6506 | * TlbLookup:
|
---|
6507 | */
|
---|
6508 | off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMem, fAlignMask, fAccess,
|
---|
6509 | idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult);
|
---|
6510 | # ifdef VBOX_WITH_STATISTICS
|
---|
6511 | off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, TlbState.idxReg1, TlbState.idxReg2,
|
---|
6512 | RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeTlbHitsForMapped));
|
---|
6513 | # endif
|
---|
6514 |
|
---|
6515 | /* [idxVarUnmapInfo] = 0; */
|
---|
6516 | off = iemNativeEmitLoadGprImm32(pReNative, off, idxRegUnmapInfo, 0);
|
---|
6517 |
|
---|
6518 | /*
|
---|
6519 | * TlbDone:
|
---|
6520 | */
|
---|
6521 | iemNativeLabelDefine(pReNative, idxLabelTlbDone, off);
|
---|
6522 |
|
---|
6523 | TlbState.freeRegsAndReleaseVars(pReNative, idxVarGCPtrMem);
|
---|
6524 |
|
---|
6525 | # ifndef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP
|
---|
6526 | /* Temp Hack: Flush all guest shadows in volatile registers in case of TLB miss. */
|
---|
6527 | iemNativeRegFlushGuestShadowsByHostMask(pReNative, IEMNATIVE_CALL_VOLATILE_GREG_MASK);
|
---|
6528 | # endif
|
---|
6529 | }
|
---|
6530 | #else
|
---|
6531 | RT_NOREF(fAccess, fAlignMask, idxLabelTlbMiss);
|
---|
6532 | #endif
|
---|
6533 |
|
---|
6534 | iemNativeVarRegisterRelease(pReNative, idxVarUnmapInfo);
|
---|
6535 | iemNativeVarRegisterRelease(pReNative, idxVarMem);
|
---|
6536 |
|
---|
6537 | return off;
|
---|
6538 | }
|
---|
6539 |
|
---|
6540 |
|
---|
6541 | #define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) \
|
---|
6542 | off = iemNativeEmitMemCommitAndUnmap(pReNative, off, (a_bMapInfo), IEM_ACCESS_DATA_ATOMIC, \
|
---|
6543 | (uintptr_t)iemNativeHlpMemCommitAndUnmapAtomic, pCallEntry->idxInstr)
|
---|
6544 |
|
---|
6545 | #define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo) \
|
---|
6546 | off = iemNativeEmitMemCommitAndUnmap(pReNative, off, (a_bMapInfo), IEM_ACCESS_DATA_RW, \
|
---|
6547 | (uintptr_t)iemNativeHlpMemCommitAndUnmapRw, pCallEntry->idxInstr)
|
---|
6548 |
|
---|
6549 | #define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo) \
|
---|
6550 | off = iemNativeEmitMemCommitAndUnmap(pReNative, off, (a_bMapInfo), IEM_ACCESS_DATA_W, \
|
---|
6551 | (uintptr_t)iemNativeHlpMemCommitAndUnmapWo, pCallEntry->idxInstr)
|
---|
6552 |
|
---|
6553 | #define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo) \
|
---|
6554 | off = iemNativeEmitMemCommitAndUnmap(pReNative, off, (a_bMapInfo), IEM_ACCESS_DATA_R, \
|
---|
6555 | (uintptr_t)iemNativeHlpMemCommitAndUnmapRo, pCallEntry->idxInstr)
|
---|
6556 |
|
---|
6557 | DECL_INLINE_THROW(uint32_t)
|
---|
6558 | iemNativeEmitMemCommitAndUnmap(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarUnmapInfo,
|
---|
6559 | uint32_t fAccess, uintptr_t pfnFunction, uint8_t idxInstr)
|
---|
6560 | {
|
---|
6561 | /*
|
---|
6562 | * Assert sanity.
|
---|
6563 | */
|
---|
6564 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarUnmapInfo);
|
---|
6565 | #if defined(VBOX_STRICT) || defined(RT_ARCH_AMD64)
|
---|
6566 | PIEMNATIVEVAR const pVarUnmapInfo = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVarUnmapInfo)];
|
---|
6567 | #endif
|
---|
6568 | Assert(pVarUnmapInfo->enmKind == kIemNativeVarKind_Stack);
|
---|
6569 | Assert( pVarUnmapInfo->idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs)
|
---|
6570 | || pVarUnmapInfo->idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS); /* must be initialized */
|
---|
6571 | #ifdef VBOX_STRICT
|
---|
6572 | switch (fAccess & (IEM_ACCESS_TYPE_MASK | IEM_ACCESS_ATOMIC))
|
---|
6573 | {
|
---|
6574 | case IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_ATOMIC:
|
---|
6575 | Assert(pfnFunction == (uintptr_t)iemNativeHlpMemCommitAndUnmapAtomic); break;
|
---|
6576 | case IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE:
|
---|
6577 | Assert(pfnFunction == (uintptr_t)iemNativeHlpMemCommitAndUnmapRw); break;
|
---|
6578 | case IEM_ACCESS_TYPE_WRITE:
|
---|
6579 | Assert(pfnFunction == (uintptr_t)iemNativeHlpMemCommitAndUnmapWo); break;
|
---|
6580 | case IEM_ACCESS_TYPE_READ:
|
---|
6581 | Assert(pfnFunction == (uintptr_t)iemNativeHlpMemCommitAndUnmapRo); break;
|
---|
6582 | default: AssertFailed();
|
---|
6583 | }
|
---|
6584 | #else
|
---|
6585 | RT_NOREF(fAccess);
|
---|
6586 | #endif
|
---|
6587 |
|
---|
6588 | /*
|
---|
6589 | * To keep things simple we have to commit any pending writes first as we
|
---|
6590 | * may end up making calls (there shouldn't be any at this point, so this
|
---|
6591 | * is just for consistency).
|
---|
6592 | */
|
---|
6593 | /** @todo we could postpone this till we make the call and reload the
|
---|
6594 | * registers after returning from the call. Not sure if that's sensible or
|
---|
6595 | * not, though. */
|
---|
6596 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
6597 |
|
---|
6598 | /*
|
---|
6599 | * Move/spill/flush stuff out of call-volatile registers.
|
---|
6600 | *
|
---|
6601 | * We exclude any register holding the bUnmapInfo variable, as we'll be
|
---|
6602 | * checking it after returning from the call and will free it afterwards.
|
---|
6603 | */
|
---|
6604 | /** @todo save+restore active registers and maybe guest shadows in miss
|
---|
6605 | * scenario. */
|
---|
6606 | off = iemNativeRegMoveAndFreeAndFlushAtCall(pReNative, off, 0 /* vacate all non-volatile regs */,
|
---|
6607 | RT_BIT_32(IEMNATIVE_VAR_IDX_UNPACK(idxVarUnmapInfo)));
|
---|
6608 |
|
---|
6609 | /*
|
---|
6610 | * If idxVarUnmapInfo is zero, we can skip all this. Otherwise we'll have
|
---|
6611 | * to call the unmap helper function.
|
---|
6612 | *
|
---|
6613 | * The likelyhood of it being zero is higher than for the TLB hit when doing
|
---|
6614 | * the mapping, as a TLB miss for an well aligned and unproblematic memory
|
---|
6615 | * access should also end up with a mapping that won't need special unmapping.
|
---|
6616 | */
|
---|
6617 | /** @todo Go over iemMemMapJmp and implement the no-unmap-needed case! That
|
---|
6618 | * should speed up things for the pure interpreter as well when TLBs
|
---|
6619 | * are enabled. */
|
---|
6620 | #ifdef RT_ARCH_AMD64
|
---|
6621 | if (pVarUnmapInfo->idxReg == UINT8_MAX)
|
---|
6622 | {
|
---|
6623 | /* test byte [rbp - xxx], 0ffh */
|
---|
6624 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
|
---|
6625 | pbCodeBuf[off++] = 0xf6;
|
---|
6626 | uint8_t const idxStackSlot = pVarUnmapInfo->idxStackSlot;
|
---|
6627 | off = iemNativeEmitGprByBpDisp(pbCodeBuf, off, 0, iemNativeStackCalcBpDisp(idxStackSlot), pReNative);
|
---|
6628 | pbCodeBuf[off++] = 0xff;
|
---|
6629 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
6630 | }
|
---|
6631 | else
|
---|
6632 | #endif
|
---|
6633 | {
|
---|
6634 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxVarUnmapInfo, &off,
|
---|
6635 | true /*fInitialized*/, IEMNATIVE_CALL_ARG1_GREG /*idxRegPref*/);
|
---|
6636 | off = iemNativeEmitTestAnyBitsInGpr8(pReNative, off, idxVarReg, 0xff);
|
---|
6637 | iemNativeVarRegisterRelease(pReNative, idxVarUnmapInfo);
|
---|
6638 | }
|
---|
6639 | uint32_t const offJmpFixup = off;
|
---|
6640 | off = iemNativeEmitJzToFixed(pReNative, off, off /* ASSUME jz rel8 suffices*/);
|
---|
6641 |
|
---|
6642 | /*
|
---|
6643 | * Call the unmap helper function.
|
---|
6644 | */
|
---|
6645 | #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING /** @todo This should be unnecessary, the mapping call will already have set it! */
|
---|
6646 | off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
|
---|
6647 | #else
|
---|
6648 | RT_NOREF(idxInstr);
|
---|
6649 | #endif
|
---|
6650 |
|
---|
6651 | /* IEMNATIVE_CALL_ARG1_GREG = idxVarUnmapInfo (first!) */
|
---|
6652 | off = iemNativeEmitLoadArgGregFromStackVar(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxVarUnmapInfo,
|
---|
6653 | 0 /*offAddend*/, IEMNATIVE_CALL_VOLATILE_GREG_MASK);
|
---|
6654 |
|
---|
6655 | /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */
|
---|
6656 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
6657 |
|
---|
6658 | /* Done setting up parameters, make the call. */
|
---|
6659 | off = iemNativeEmitCallImm(pReNative, off, pfnFunction);
|
---|
6660 |
|
---|
6661 | /* The bUnmapInfo variable is implictly free by these MCs. */
|
---|
6662 | iemNativeVarFreeLocal(pReNative, idxVarUnmapInfo);
|
---|
6663 |
|
---|
6664 | /*
|
---|
6665 | * Done, just fixup the jump for the non-call case.
|
---|
6666 | */
|
---|
6667 | iemNativeFixupFixedJump(pReNative, offJmpFixup, off);
|
---|
6668 |
|
---|
6669 | return off;
|
---|
6670 | }
|
---|
6671 |
|
---|
6672 |
|
---|
6673 |
|
---|
6674 | /*********************************************************************************************************************************
|
---|
6675 | * State and Exceptions *
|
---|
6676 | *********************************************************************************************************************************/
|
---|
6677 |
|
---|
6678 | #define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() off = iemNativeEmitPrepareFpuForUse(pReNative, off, true /*fForChange*/)
|
---|
6679 | #define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() off = iemNativeEmitPrepareFpuForUse(pReNative, off, false /*fForChange*/)
|
---|
6680 |
|
---|
6681 | #define IEM_MC_PREPARE_SSE_USAGE() off = iemNativeEmitPrepareFpuForUse(pReNative, off, true /*fForChange*/)
|
---|
6682 | #define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() off = iemNativeEmitPrepareFpuForUse(pReNative, off, true /*fForChange*/)
|
---|
6683 | #define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() off = iemNativeEmitPrepareFpuForUse(pReNative, off, false /*fForChange*/)
|
---|
6684 |
|
---|
6685 | #define IEM_MC_PREPARE_AVX_USAGE() off = iemNativeEmitPrepareFpuForUse(pReNative, off, true /*fForChange*/)
|
---|
6686 | #define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() off = iemNativeEmitPrepareFpuForUse(pReNative, off, true /*fForChange*/)
|
---|
6687 | #define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() off = iemNativeEmitPrepareFpuForUse(pReNative, off, false /*fForChange*/)
|
---|
6688 |
|
---|
6689 |
|
---|
6690 | DECL_INLINE_THROW(uint32_t) iemNativeEmitPrepareFpuForUse(PIEMRECOMPILERSTATE pReNative, uint32_t off, bool fForChange)
|
---|
6691 | {
|
---|
6692 | /** @todo this needs a lot more work later. */
|
---|
6693 | RT_NOREF(pReNative, fForChange);
|
---|
6694 | return off;
|
---|
6695 | }
|
---|
6696 |
|
---|
6697 |
|
---|
6698 |
|
---|
6699 | /*********************************************************************************************************************************
|
---|
6700 | * Emitters for FPU related operations. *
|
---|
6701 | *********************************************************************************************************************************/
|
---|
6702 |
|
---|
6703 | #define IEM_MC_FETCH_FCW(a_u16Fcw) \
|
---|
6704 | off = iemNativeEmitFetchFpuFcw(pReNative, off, a_u16Fcw)
|
---|
6705 |
|
---|
6706 | /** Emits code for IEM_MC_FETCH_FCW. */
|
---|
6707 | DECL_INLINE_THROW(uint32_t)
|
---|
6708 | iemNativeEmitFetchFpuFcw(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar)
|
---|
6709 | {
|
---|
6710 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
6711 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, sizeof(uint16_t));
|
---|
6712 |
|
---|
6713 | uint8_t const idxReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
6714 |
|
---|
6715 | /* Allocate a temporary FCW register. */
|
---|
6716 | /** @todo eliminate extra register */
|
---|
6717 | uint8_t const idxFcwReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_FpuFcw,
|
---|
6718 | kIemNativeGstRegUse_ReadOnly);
|
---|
6719 |
|
---|
6720 | off = iemNativeEmitLoadGprFromGpr16(pReNative, off, idxReg, idxFcwReg);
|
---|
6721 |
|
---|
6722 | /* Free but don't flush the FCW register. */
|
---|
6723 | iemNativeRegFreeTmp(pReNative, idxFcwReg);
|
---|
6724 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
6725 |
|
---|
6726 | return off;
|
---|
6727 | }
|
---|
6728 |
|
---|
6729 |
|
---|
6730 | #define IEM_MC_FETCH_FSW(a_u16Fsw) \
|
---|
6731 | off = iemNativeEmitFetchFpuFsw(pReNative, off, a_u16Fsw)
|
---|
6732 |
|
---|
6733 | /** Emits code for IEM_MC_FETCH_FSW. */
|
---|
6734 | DECL_INLINE_THROW(uint32_t)
|
---|
6735 | iemNativeEmitFetchFpuFsw(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar)
|
---|
6736 | {
|
---|
6737 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
6738 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, sizeof(uint16_t));
|
---|
6739 |
|
---|
6740 | uint8_t const idxReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off, false /*fInitialized*/);
|
---|
6741 | /* Allocate a temporary FSW register. */
|
---|
6742 | /** @todo eliminate extra register */
|
---|
6743 | uint8_t const idxFswReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_FpuFsw,
|
---|
6744 | kIemNativeGstRegUse_ReadOnly);
|
---|
6745 |
|
---|
6746 | off = iemNativeEmitLoadGprFromGpr16(pReNative, off, idxReg, idxFswReg);
|
---|
6747 |
|
---|
6748 | /* Free but don't flush the FSW register. */
|
---|
6749 | iemNativeRegFreeTmp(pReNative, idxFswReg);
|
---|
6750 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
6751 |
|
---|
6752 | return off;
|
---|
6753 | }
|
---|
6754 |
|
---|
6755 |
|
---|
6756 |
|
---|
6757 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
6758 |
|
---|
6759 |
|
---|
6760 | /*********************************************************************************************************************************
|
---|
6761 | * Emitters for SSE/AVX specific operations. *
|
---|
6762 | *********************************************************************************************************************************/
|
---|
6763 |
|
---|
6764 | #define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
|
---|
6765 | off = iemNativeEmitSimdCopyXregU128(pReNative, off, a_iXRegDst, a_iXRegSrc)
|
---|
6766 |
|
---|
6767 | /** Emits code for IEM_MC_COPY_XREG_U128. */
|
---|
6768 | DECL_INLINE_THROW(uint32_t)
|
---|
6769 | iemNativeEmitSimdCopyXregU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iXRegDst, uint8_t iXRegSrc)
|
---|
6770 | {
|
---|
6771 | /* Allocate destination and source register. */
|
---|
6772 | uint8_t const idxSimdRegDst = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iXRegDst),
|
---|
6773 | kIemNativeGstSimdRegLdStSz_Low128, kIemNativeGstRegUse_ForFullWrite);
|
---|
6774 | uint8_t const idxSimdRegSrc = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iXRegSrc),
|
---|
6775 | kIemNativeGstSimdRegLdStSz_Low128, kIemNativeGstRegUse_ReadOnly);
|
---|
6776 |
|
---|
6777 | off = iemNativeEmitSimdLoadVecRegFromVecRegU128(pReNative, off, idxSimdRegDst, idxSimdRegSrc);
|
---|
6778 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, iXRegDst);
|
---|
6779 | /* We don't need to write everything back here as the destination is marked as dirty and will be flushed automatically. */
|
---|
6780 |
|
---|
6781 | /* Free but don't flush the source and destination register. */
|
---|
6782 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegDst);
|
---|
6783 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegSrc);
|
---|
6784 |
|
---|
6785 | return off;
|
---|
6786 | }
|
---|
6787 |
|
---|
6788 |
|
---|
6789 | #define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg, a_iQWord) \
|
---|
6790 | off = iemNativeEmitSimdFetchXregU64(pReNative, off, a_u64Value, a_iXReg, a_iQWord)
|
---|
6791 |
|
---|
6792 | /** Emits code for IEM_MC_FETCH_XREG_U64. */
|
---|
6793 | DECL_INLINE_THROW(uint32_t)
|
---|
6794 | iemNativeEmitSimdFetchXregU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar, uint8_t iXReg, uint8_t iQWord)
|
---|
6795 | {
|
---|
6796 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
6797 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, sizeof(uint64_t));
|
---|
6798 |
|
---|
6799 | uint8_t const idxSimdRegSrc = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iXReg),
|
---|
6800 | kIemNativeGstSimdRegLdStSz_Low128, kIemNativeGstRegUse_ReadOnly);
|
---|
6801 |
|
---|
6802 | iemNativeVarSetKindToStack(pReNative, idxDstVar);
|
---|
6803 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
6804 |
|
---|
6805 | off = iemNativeEmitSimdLoadGprFromVecRegU64(pReNative, off, idxVarReg, idxSimdRegSrc, iQWord);
|
---|
6806 |
|
---|
6807 | /* Free but don't flush the source register. */
|
---|
6808 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegSrc);
|
---|
6809 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
6810 |
|
---|
6811 | return off;
|
---|
6812 | }
|
---|
6813 |
|
---|
6814 |
|
---|
6815 | #define IEM_MC_FETCH_XREG_U32(a_u64Value, a_iXReg, a_iDWord) \
|
---|
6816 | off = iemNativeEmitSimdFetchXregU32(pReNative, off, a_u64Value, a_iXReg, a_iDWord)
|
---|
6817 |
|
---|
6818 | /** Emits code for IEM_MC_FETCH_XREG_U32. */
|
---|
6819 | DECL_INLINE_THROW(uint32_t)
|
---|
6820 | iemNativeEmitSimdFetchXregU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar, uint8_t iXReg, uint8_t iDWord)
|
---|
6821 | {
|
---|
6822 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
6823 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, sizeof(uint32_t));
|
---|
6824 |
|
---|
6825 | uint8_t const idxSimdRegSrc = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iXReg),
|
---|
6826 | kIemNativeGstSimdRegLdStSz_Low128, kIemNativeGstRegUse_ReadOnly);
|
---|
6827 |
|
---|
6828 | iemNativeVarSetKindToStack(pReNative, idxDstVar);
|
---|
6829 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
6830 |
|
---|
6831 | off = iemNativeEmitSimdLoadGprFromVecRegU32(pReNative, off, idxVarReg, idxSimdRegSrc, iDWord);
|
---|
6832 |
|
---|
6833 | /* Free but don't flush the source register. */
|
---|
6834 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegSrc);
|
---|
6835 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
6836 |
|
---|
6837 | return off;
|
---|
6838 | }
|
---|
6839 |
|
---|
6840 |
|
---|
6841 | #define IEM_MC_FETCH_XREG_U16(a_u64Value, a_iXReg, a_iWord) \
|
---|
6842 | off = iemNativeEmitSimdFetchXregU16(pReNative, off, a_u64Value, a_iXReg, a_iWord)
|
---|
6843 |
|
---|
6844 | /** Emits code for IEM_MC_FETCH_XREG_U16. */
|
---|
6845 | DECL_INLINE_THROW(uint32_t)
|
---|
6846 | iemNativeEmitSimdFetchXregU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar, uint8_t iXReg, uint8_t iWord)
|
---|
6847 | {
|
---|
6848 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
6849 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, sizeof(uint16_t));
|
---|
6850 |
|
---|
6851 | uint8_t const idxSimdRegSrc = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iXReg),
|
---|
6852 | kIemNativeGstSimdRegLdStSz_Low128, kIemNativeGstRegUse_ReadOnly);
|
---|
6853 |
|
---|
6854 | iemNativeVarSetKindToStack(pReNative, idxDstVar);
|
---|
6855 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
6856 |
|
---|
6857 | off = iemNativeEmitSimdLoadGprFromVecRegU16(pReNative, off, idxVarReg, idxSimdRegSrc, iWord);
|
---|
6858 |
|
---|
6859 | /* Free but don't flush the source register. */
|
---|
6860 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegSrc);
|
---|
6861 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
6862 |
|
---|
6863 | return off;
|
---|
6864 | }
|
---|
6865 |
|
---|
6866 |
|
---|
6867 | #define IEM_MC_FETCH_XREG_U8(a_u64Value, a_iXReg, a_iByte) \
|
---|
6868 | off = iemNativeEmitSimdFetchXregU8(pReNative, off, a_u64Value, a_iXReg, a_iByte)
|
---|
6869 |
|
---|
6870 | /** Emits code for IEM_MC_FETCH_XREG_U8. */
|
---|
6871 | DECL_INLINE_THROW(uint32_t)
|
---|
6872 | iemNativeEmitSimdFetchXregU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar, uint8_t iXReg, uint8_t iByte)
|
---|
6873 | {
|
---|
6874 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
6875 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, sizeof(uint8_t));
|
---|
6876 |
|
---|
6877 | uint8_t const idxSimdRegSrc = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iXReg),
|
---|
6878 | kIemNativeGstSimdRegLdStSz_Low128, kIemNativeGstRegUse_ReadOnly);
|
---|
6879 |
|
---|
6880 | iemNativeVarSetKindToStack(pReNative, idxDstVar);
|
---|
6881 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
6882 |
|
---|
6883 | off = iemNativeEmitSimdLoadGprFromVecRegU8(pReNative, off, idxVarReg, idxSimdRegSrc, iByte);
|
---|
6884 |
|
---|
6885 | /* Free but don't flush the source register. */
|
---|
6886 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegSrc);
|
---|
6887 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
6888 |
|
---|
6889 | return off;
|
---|
6890 | }
|
---|
6891 |
|
---|
6892 |
|
---|
6893 | #define IEM_MC_STORE_XREG_U64(a_iXReg, a_iQWord, a_u64Value) \
|
---|
6894 | off = iemNativeEmitSimdStoreXregU64(pReNative, off, a_iXReg, a_u64Value, a_iQWord)
|
---|
6895 |
|
---|
6896 | /** Emits code for IEM_MC_STORE_XREG_U64. */
|
---|
6897 | DECL_INLINE_THROW(uint32_t)
|
---|
6898 | iemNativeEmitSimdStoreXregU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iXReg, uint8_t idxDstVar, uint8_t iQWord)
|
---|
6899 | {
|
---|
6900 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
6901 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, sizeof(uint64_t));
|
---|
6902 |
|
---|
6903 | uint8_t const idxSimdRegDst = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iXReg),
|
---|
6904 | kIemNativeGstSimdRegLdStSz_Low128, kIemNativeGstRegUse_ForUpdate);
|
---|
6905 |
|
---|
6906 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
6907 |
|
---|
6908 | off = iemNativeEmitSimdStoreGprToVecRegU64(pReNative, off, idxSimdRegDst, idxVarReg, iQWord);
|
---|
6909 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, iXReg);
|
---|
6910 |
|
---|
6911 | /* Free but don't flush the source register. */
|
---|
6912 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegDst);
|
---|
6913 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
6914 |
|
---|
6915 | return off;
|
---|
6916 | }
|
---|
6917 |
|
---|
6918 |
|
---|
6919 | #define IEM_MC_STORE_XREG_U32(a_iXReg, a_iDWord, a_u32Value) \
|
---|
6920 | off = iemNativeEmitSimdStoreXregU32(pReNative, off, a_iXReg, a_u32Value, a_iDWord)
|
---|
6921 |
|
---|
6922 | /** Emits code for IEM_MC_STORE_XREG_U32. */
|
---|
6923 | DECL_INLINE_THROW(uint32_t)
|
---|
6924 | iemNativeEmitSimdStoreXregU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iXReg, uint8_t idxDstVar, uint8_t iDWord)
|
---|
6925 | {
|
---|
6926 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
6927 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, sizeof(uint32_t));
|
---|
6928 |
|
---|
6929 | uint8_t const idxSimdRegDst = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iXReg),
|
---|
6930 | kIemNativeGstSimdRegLdStSz_Low128, kIemNativeGstRegUse_ForUpdate);
|
---|
6931 |
|
---|
6932 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
6933 |
|
---|
6934 | off = iemNativeEmitSimdStoreGprToVecRegU32(pReNative, off, idxSimdRegDst, idxVarReg, iDWord);
|
---|
6935 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, iXReg);
|
---|
6936 |
|
---|
6937 | /* Free but don't flush the source register. */
|
---|
6938 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegDst);
|
---|
6939 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
6940 |
|
---|
6941 | return off;
|
---|
6942 | }
|
---|
6943 |
|
---|
6944 |
|
---|
6945 | #define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
|
---|
6946 | off = iemNativeEmitSimdStoreXregU64ZxU128(pReNative, off, a_iXReg, a_u64Value)
|
---|
6947 |
|
---|
6948 | /** Emits code for IEM_MC_STORE_XREG_U32. */
|
---|
6949 | DECL_INLINE_THROW(uint32_t)
|
---|
6950 | iemNativeEmitSimdStoreXregU64ZxU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iXReg, uint8_t idxDstVar)
|
---|
6951 | {
|
---|
6952 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
6953 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, sizeof(uint64_t));
|
---|
6954 |
|
---|
6955 | uint8_t const idxSimdRegDst = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iXReg),
|
---|
6956 | kIemNativeGstSimdRegLdStSz_Low128, kIemNativeGstRegUse_ForUpdate);
|
---|
6957 |
|
---|
6958 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
6959 |
|
---|
6960 | /* Zero the vector register first, then store the 64-bit value to the lower 64-bit. */
|
---|
6961 | off = iemNativeEmitSimdZeroVecRegLowU128(pReNative, off, idxSimdRegDst);
|
---|
6962 | off = iemNativeEmitSimdStoreGprToVecRegU64(pReNative, off, idxSimdRegDst, idxVarReg, 0);
|
---|
6963 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, iXReg);
|
---|
6964 |
|
---|
6965 | /* Free but don't flush the source register. */
|
---|
6966 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegDst);
|
---|
6967 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
6968 |
|
---|
6969 | return off;
|
---|
6970 | }
|
---|
6971 |
|
---|
6972 |
|
---|
6973 | #define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
|
---|
6974 | off = iemNativeEmitSimdStoreXregU32ZxU128(pReNative, off, a_iXReg, a_u32Value)
|
---|
6975 |
|
---|
6976 | /** Emits code for IEM_MC_STORE_XREG_U32. */
|
---|
6977 | DECL_INLINE_THROW(uint32_t)
|
---|
6978 | iemNativeEmitSimdStoreXregU32ZxU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iXReg, uint8_t idxDstVar)
|
---|
6979 | {
|
---|
6980 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
6981 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, sizeof(uint32_t));
|
---|
6982 |
|
---|
6983 | uint8_t const idxSimdRegDst = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iXReg),
|
---|
6984 | kIemNativeGstSimdRegLdStSz_Low128, kIemNativeGstRegUse_ForUpdate);
|
---|
6985 |
|
---|
6986 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
6987 |
|
---|
6988 | /* Zero the vector register first, then store the 32-bit value to the lowest 32-bit element. */
|
---|
6989 | off = iemNativeEmitSimdZeroVecRegLowU128(pReNative, off, idxSimdRegDst);
|
---|
6990 | off = iemNativeEmitSimdStoreGprToVecRegU32(pReNative, off, idxSimdRegDst, idxVarReg, 0);
|
---|
6991 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, iXReg);
|
---|
6992 |
|
---|
6993 | /* Free but don't flush the source register. */
|
---|
6994 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegDst);
|
---|
6995 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
6996 |
|
---|
6997 | return off;
|
---|
6998 | }
|
---|
6999 |
|
---|
7000 |
|
---|
7001 | #define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc, a_iQWord) \
|
---|
7002 | off = iemNativeEmitSimdFetchYregU64(pReNative, off, a_u64Dst, a_iYRegSrc, a_iQWord)
|
---|
7003 |
|
---|
7004 | /** Emits code for IEM_MC_FETCH_YREG_U64. */
|
---|
7005 | DECL_INLINE_THROW(uint32_t)
|
---|
7006 | iemNativeEmitSimdFetchYregU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar, uint8_t iYReg, uint8_t iQWord)
|
---|
7007 | {
|
---|
7008 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
7009 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, sizeof(uint64_t));
|
---|
7010 |
|
---|
7011 | uint8_t const idxSimdRegSrc = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iYReg),
|
---|
7012 | iQWord >= 2
|
---|
7013 | ? kIemNativeGstSimdRegLdStSz_High128
|
---|
7014 | : kIemNativeGstSimdRegLdStSz_Low128,
|
---|
7015 | kIemNativeGstRegUse_ReadOnly);
|
---|
7016 |
|
---|
7017 | iemNativeVarSetKindToStack(pReNative, idxDstVar);
|
---|
7018 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
7019 |
|
---|
7020 | off = iemNativeEmitSimdLoadGprFromVecRegU64(pReNative, off, idxVarReg, idxSimdRegSrc, iQWord);
|
---|
7021 |
|
---|
7022 | /* Free but don't flush the source register. */
|
---|
7023 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegSrc);
|
---|
7024 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
7025 |
|
---|
7026 | return off;
|
---|
7027 | }
|
---|
7028 |
|
---|
7029 |
|
---|
7030 | #define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
|
---|
7031 | off = iemNativeEmitSimdFetchYregU32(pReNative, off, a_u32Dst, a_iYRegSrc, 0)
|
---|
7032 |
|
---|
7033 | /** Emits code for IEM_MC_FETCH_YREG_U32. */
|
---|
7034 | DECL_INLINE_THROW(uint32_t)
|
---|
7035 | iemNativeEmitSimdFetchYregU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar, uint8_t iYReg, uint8_t iDWord)
|
---|
7036 | {
|
---|
7037 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxDstVar);
|
---|
7038 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxDstVar, sizeof(uint32_t));
|
---|
7039 |
|
---|
7040 | uint8_t const idxSimdRegSrc = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iYReg),
|
---|
7041 | iDWord >= 4
|
---|
7042 | ? kIemNativeGstSimdRegLdStSz_High128
|
---|
7043 | : kIemNativeGstSimdRegLdStSz_Low128,
|
---|
7044 | kIemNativeGstRegUse_ReadOnly);
|
---|
7045 |
|
---|
7046 | iemNativeVarSetKindToStack(pReNative, idxDstVar);
|
---|
7047 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxDstVar, &off);
|
---|
7048 |
|
---|
7049 | off = iemNativeEmitSimdLoadGprFromVecRegU32(pReNative, off, idxVarReg, idxSimdRegSrc, iDWord);
|
---|
7050 |
|
---|
7051 | /* Free but don't flush the source register. */
|
---|
7052 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegSrc);
|
---|
7053 | iemNativeVarRegisterRelease(pReNative, idxDstVar);
|
---|
7054 |
|
---|
7055 | return off;
|
---|
7056 | }
|
---|
7057 |
|
---|
7058 |
|
---|
7059 | #define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
|
---|
7060 | off = iemNativeEmitSimdClearYregHighU128(pReNative, off, a_iYReg)
|
---|
7061 |
|
---|
7062 | /** Emits code for IEM_MC_CLEAR_YREG_128_UP. */
|
---|
7063 | DECL_INLINE_THROW(uint32_t)
|
---|
7064 | iemNativeEmitSimdClearYregHighU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iYReg)
|
---|
7065 | {
|
---|
7066 | uint8_t const idxSimdReg = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iYReg),
|
---|
7067 | kIemNativeGstSimdRegLdStSz_High128, kIemNativeGstRegUse_ForFullWrite);
|
---|
7068 |
|
---|
7069 | off = iemNativeEmitSimdZeroVecRegHighU128(pReNative, off, idxSimdReg);
|
---|
7070 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_HI_U128(pReNative, iYReg);
|
---|
7071 |
|
---|
7072 | /* Free but don't flush the register. */
|
---|
7073 | iemNativeSimdRegFreeTmp(pReNative, idxSimdReg);
|
---|
7074 |
|
---|
7075 | return off;
|
---|
7076 | }
|
---|
7077 |
|
---|
7078 |
|
---|
7079 | #define IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX(a_iXRegDst, a_u32Src) \
|
---|
7080 | off = iemNativeEmitSimdBroadcastXregU32ZxVlmax(pReNative, off, a_iXRegDst, a_u32Src)
|
---|
7081 |
|
---|
7082 | /** Emits code for IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX. */
|
---|
7083 | DECL_INLINE_THROW(uint32_t)
|
---|
7084 | iemNativeEmitSimdBroadcastXregU32ZxVlmax(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iXReg, uint8_t idxSrcVar)
|
---|
7085 | {
|
---|
7086 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxSrcVar);
|
---|
7087 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxSrcVar, sizeof(uint32_t));
|
---|
7088 |
|
---|
7089 | uint8_t const idxSimdRegDst = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iXReg),
|
---|
7090 | kIemNativeGstSimdRegLdStSz_256, kIemNativeGstRegUse_ForFullWrite);
|
---|
7091 |
|
---|
7092 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxSrcVar, &off);
|
---|
7093 |
|
---|
7094 | off = iemNativeEmitSimdBroadcastGprToVecRegU32(pReNative, off, idxSimdRegDst, idxVarReg, false /*f256Bit*/);
|
---|
7095 | off = iemNativeEmitSimdZeroVecRegHighU128(pReNative, off, idxSimdRegDst);
|
---|
7096 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, iXReg);
|
---|
7097 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_HI_U128(pReNative, iXReg);
|
---|
7098 |
|
---|
7099 | /* Free but don't flush the source register. */
|
---|
7100 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegDst);
|
---|
7101 | iemNativeVarRegisterRelease(pReNative, idxSrcVar);
|
---|
7102 |
|
---|
7103 | return off;
|
---|
7104 | }
|
---|
7105 |
|
---|
7106 |
|
---|
7107 | #define IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX(a_iXRegDst, a_u64Src) \
|
---|
7108 | off = iemNativeEmitSimdBroadcastXregU64ZxVlmax(pReNative, off, a_iXRegDst, a_u64Src)
|
---|
7109 |
|
---|
7110 | /** Emits code for IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX. */
|
---|
7111 | DECL_INLINE_THROW(uint32_t)
|
---|
7112 | iemNativeEmitSimdBroadcastXregU64ZxVlmax(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iXReg, uint8_t idxSrcVar)
|
---|
7113 | {
|
---|
7114 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxSrcVar);
|
---|
7115 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxSrcVar, sizeof(uint64_t));
|
---|
7116 |
|
---|
7117 | uint8_t const idxSimdRegDst = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iXReg),
|
---|
7118 | kIemNativeGstSimdRegLdStSz_256, kIemNativeGstRegUse_ForFullWrite);
|
---|
7119 |
|
---|
7120 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxSrcVar, &off);
|
---|
7121 |
|
---|
7122 | off = iemNativeEmitSimdBroadcastGprToVecRegU64(pReNative, off, idxSimdRegDst, idxVarReg, false /*f256Bit*/);
|
---|
7123 | off = iemNativeEmitSimdZeroVecRegHighU128(pReNative, off, idxSimdRegDst);
|
---|
7124 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, iXReg);
|
---|
7125 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_HI_U128(pReNative, iXReg);
|
---|
7126 |
|
---|
7127 | /* Free but don't flush the source register. */
|
---|
7128 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegDst);
|
---|
7129 | iemNativeVarRegisterRelease(pReNative, idxSrcVar);
|
---|
7130 |
|
---|
7131 | return off;
|
---|
7132 | }
|
---|
7133 |
|
---|
7134 |
|
---|
7135 | #define IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
|
---|
7136 | off = iemNativeEmitSimdBroadcastYregU32ZxVlmax(pReNative, off, a_iYRegDst, a_u32Src)
|
---|
7137 |
|
---|
7138 | /** Emits code for IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX. */
|
---|
7139 | DECL_INLINE_THROW(uint32_t)
|
---|
7140 | iemNativeEmitSimdBroadcastYregU32ZxVlmax(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iYReg, uint8_t idxSrcVar)
|
---|
7141 | {
|
---|
7142 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxSrcVar);
|
---|
7143 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxSrcVar, sizeof(uint32_t));
|
---|
7144 |
|
---|
7145 | uint8_t const idxSimdRegDst = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iYReg),
|
---|
7146 | kIemNativeGstSimdRegLdStSz_256, kIemNativeGstRegUse_ForFullWrite);
|
---|
7147 |
|
---|
7148 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxSrcVar, &off);
|
---|
7149 |
|
---|
7150 | off = iemNativeEmitSimdBroadcastGprToVecRegU32(pReNative, off, idxSimdRegDst, idxVarReg, true /*f256Bit*/);
|
---|
7151 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, iYReg);
|
---|
7152 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_HI_U128(pReNative, iYReg);
|
---|
7153 |
|
---|
7154 | /* Free but don't flush the source register. */
|
---|
7155 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegDst);
|
---|
7156 | iemNativeVarRegisterRelease(pReNative, idxSrcVar);
|
---|
7157 |
|
---|
7158 | return off;
|
---|
7159 | }
|
---|
7160 |
|
---|
7161 |
|
---|
7162 | #define IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
|
---|
7163 | off = iemNativeEmitSimdBroadcastYregU64ZxVlmax(pReNative, off, a_iYRegDst, a_u64Src)
|
---|
7164 |
|
---|
7165 | /** Emits code for IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX. */
|
---|
7166 | DECL_INLINE_THROW(uint32_t)
|
---|
7167 | iemNativeEmitSimdBroadcastYregU64ZxVlmax(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iYReg, uint8_t idxSrcVar)
|
---|
7168 | {
|
---|
7169 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxSrcVar);
|
---|
7170 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxSrcVar, sizeof(uint64_t));
|
---|
7171 |
|
---|
7172 | uint8_t const idxSimdRegDst = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iYReg),
|
---|
7173 | kIemNativeGstSimdRegLdStSz_256, kIemNativeGstRegUse_ForFullWrite);
|
---|
7174 |
|
---|
7175 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxSrcVar, &off);
|
---|
7176 |
|
---|
7177 | off = iemNativeEmitSimdBroadcastGprToVecRegU64(pReNative, off, idxSimdRegDst, idxVarReg, true /*f256Bit*/);
|
---|
7178 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, iYReg);
|
---|
7179 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_HI_U128(pReNative, iYReg);
|
---|
7180 |
|
---|
7181 | /* Free but don't flush the source register. */
|
---|
7182 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegDst);
|
---|
7183 | iemNativeVarRegisterRelease(pReNative, idxSrcVar);
|
---|
7184 |
|
---|
7185 | return off;
|
---|
7186 | }
|
---|
7187 |
|
---|
7188 |
|
---|
7189 | #define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
|
---|
7190 | off = iemNativeEmitSimdStoreYregU32ZxVlmax(pReNative, off, a_iYRegDst, a_u32Src)
|
---|
7191 |
|
---|
7192 | /** Emits code for IEM_MC_STORE_YREG_U32_ZX_VLMAX. */
|
---|
7193 | DECL_INLINE_THROW(uint32_t)
|
---|
7194 | iemNativeEmitSimdStoreYregU32ZxVlmax(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iYReg, uint8_t idxSrcVar)
|
---|
7195 | {
|
---|
7196 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxSrcVar);
|
---|
7197 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxSrcVar, sizeof(uint32_t));
|
---|
7198 |
|
---|
7199 | uint8_t const idxSimdRegDst = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iYReg),
|
---|
7200 | kIemNativeGstSimdRegLdStSz_256, kIemNativeGstRegUse_ForFullWrite);
|
---|
7201 |
|
---|
7202 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxSrcVar, &off);
|
---|
7203 |
|
---|
7204 | off = iemNativeEmitSimdZeroVecRegU256(pReNative, off, idxSimdRegDst);
|
---|
7205 | off = iemNativeEmitSimdStoreGprToVecRegU32(pReNative, off, idxSimdRegDst, idxVarReg, 0 /*iDWord*/);
|
---|
7206 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, iYReg);
|
---|
7207 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_HI_U128(pReNative, iYReg);
|
---|
7208 |
|
---|
7209 | /* Free but don't flush the source register. */
|
---|
7210 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegDst);
|
---|
7211 | iemNativeVarRegisterRelease(pReNative, idxSrcVar);
|
---|
7212 |
|
---|
7213 | return off;
|
---|
7214 | }
|
---|
7215 |
|
---|
7216 |
|
---|
7217 | #define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
|
---|
7218 | off = iemNativeEmitSimdStoreYregU64ZxVlmax(pReNative, off, a_iYRegDst, a_u64Src)
|
---|
7219 |
|
---|
7220 | /** Emits code for IEM_MC_STORE_YREG_U64_ZX_VLMAX. */
|
---|
7221 | DECL_INLINE_THROW(uint32_t)
|
---|
7222 | iemNativeEmitSimdStoreYregU64ZxVlmax(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iYReg, uint8_t idxSrcVar)
|
---|
7223 | {
|
---|
7224 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxSrcVar);
|
---|
7225 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxSrcVar, sizeof(uint64_t));
|
---|
7226 |
|
---|
7227 | uint8_t const idxSimdRegDst = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iYReg),
|
---|
7228 | kIemNativeGstSimdRegLdStSz_256, kIemNativeGstRegUse_ForFullWrite);
|
---|
7229 |
|
---|
7230 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxSrcVar, &off);
|
---|
7231 |
|
---|
7232 | off = iemNativeEmitSimdZeroVecRegU256(pReNative, off, idxSimdRegDst);
|
---|
7233 | off = iemNativeEmitSimdStoreGprToVecRegU64(pReNative, off, idxSimdRegDst, idxVarReg, 0 /*iQWord*/);
|
---|
7234 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, iYReg);
|
---|
7235 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_HI_U128(pReNative, iYReg);
|
---|
7236 |
|
---|
7237 | /* Free but don't flush the source register. */
|
---|
7238 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegDst);
|
---|
7239 | iemNativeVarRegisterRelease(pReNative, idxSrcVar);
|
---|
7240 |
|
---|
7241 | return off;
|
---|
7242 | }
|
---|
7243 |
|
---|
7244 |
|
---|
7245 | #define IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
|
---|
7246 | off = iemNativeEmitSimdMergeYregU64LocalU64HiZxVlmax(pReNative, off, a_iYRegDst, a_u64Local, a_iYRegSrcHx)
|
---|
7247 |
|
---|
7248 | /** Emits code for IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX. */
|
---|
7249 | DECL_INLINE_THROW(uint32_t)
|
---|
7250 | iemNativeEmitSimdMergeYregU64LocalU64HiZxVlmax(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iYRegDst, uint8_t idxSrcVar, uint8_t iYRegSrcHx)
|
---|
7251 | {
|
---|
7252 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxSrcVar);
|
---|
7253 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxSrcVar, sizeof(uint64_t));
|
---|
7254 |
|
---|
7255 | uint8_t const idxSimdRegDst = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iYRegDst),
|
---|
7256 | kIemNativeGstSimdRegLdStSz_256, kIemNativeGstRegUse_ForFullWrite);
|
---|
7257 | uint8_t const idxSimdRegSrcHx = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iYRegSrcHx),
|
---|
7258 | kIemNativeGstSimdRegLdStSz_Low128, kIemNativeGstRegUse_ReadOnly);
|
---|
7259 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxSrcVar, &off);
|
---|
7260 |
|
---|
7261 | off = iemNativeEmitSimdLoadVecRegFromVecRegU128(pReNative, off, idxSimdRegDst, idxSimdRegSrcHx);
|
---|
7262 | off = iemNativeEmitSimdStoreGprToVecRegU64(pReNative, off, idxSimdRegDst, idxVarReg, 0 /*iQWord*/);
|
---|
7263 | off = iemNativeEmitSimdZeroVecRegHighU128(pReNative, off, idxSimdRegDst);
|
---|
7264 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, iYRegDst);
|
---|
7265 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_HI_U128(pReNative, iYRegDst);
|
---|
7266 |
|
---|
7267 | /* Free but don't flush the source and destination registers. */
|
---|
7268 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegSrcHx);
|
---|
7269 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegDst);
|
---|
7270 | iemNativeVarRegisterRelease(pReNative, idxSrcVar);
|
---|
7271 |
|
---|
7272 | return off;
|
---|
7273 | }
|
---|
7274 |
|
---|
7275 |
|
---|
7276 | #define IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX(a_iYRegDst, a_iYRegSrcHx, a_u64Local) \
|
---|
7277 | off = iemNativeEmitSimdMergeYregU64LoU64LocalZxVlmax(pReNative, off, a_iYRegDst, a_iYRegSrcHx, a_u64Local)
|
---|
7278 |
|
---|
7279 | /** Emits code for IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX. */
|
---|
7280 | DECL_INLINE_THROW(uint32_t)
|
---|
7281 | iemNativeEmitSimdMergeYregU64LoU64LocalZxVlmax(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iYRegDst, uint8_t iYRegSrcHx, uint8_t idxSrcVar)
|
---|
7282 | {
|
---|
7283 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxSrcVar);
|
---|
7284 | IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxSrcVar, sizeof(uint64_t));
|
---|
7285 |
|
---|
7286 | uint8_t const idxSimdRegDst = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iYRegDst),
|
---|
7287 | kIemNativeGstSimdRegLdStSz_256, kIemNativeGstRegUse_ForFullWrite);
|
---|
7288 | uint8_t const idxSimdRegSrcHx = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iYRegSrcHx),
|
---|
7289 | kIemNativeGstSimdRegLdStSz_Low128, kIemNativeGstRegUse_ReadOnly);
|
---|
7290 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxSrcVar, &off);
|
---|
7291 |
|
---|
7292 | off = iemNativeEmitSimdLoadVecRegFromVecRegU128(pReNative, off, idxSimdRegDst, idxSimdRegSrcHx);
|
---|
7293 | off = iemNativeEmitSimdStoreGprToVecRegU64(pReNative, off, idxSimdRegDst, idxVarReg, 1 /*iQWord*/);
|
---|
7294 | off = iemNativeEmitSimdZeroVecRegHighU128(pReNative, off, idxSimdRegDst);
|
---|
7295 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, iYRegDst);
|
---|
7296 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_HI_U128(pReNative, iYRegDst);
|
---|
7297 |
|
---|
7298 | /* Free but don't flush the source and destination registers. */
|
---|
7299 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegSrcHx);
|
---|
7300 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegDst);
|
---|
7301 | iemNativeVarRegisterRelease(pReNative, idxSrcVar);
|
---|
7302 |
|
---|
7303 | return off;
|
---|
7304 | }
|
---|
7305 |
|
---|
7306 |
|
---|
7307 | #define IEM_MC_CLEAR_XREG_U32_MASK(a_iXReg, a_bMask) \
|
---|
7308 | off = iemNativeEmitSimdClearXregU32Mask(pReNative, off, a_iXReg, a_bMask)
|
---|
7309 |
|
---|
7310 |
|
---|
7311 | /** Emits code for IEM_MC_CLEAR_XREG_U32_MASK. */
|
---|
7312 | DECL_INLINE_THROW(uint32_t)
|
---|
7313 | iemNativeEmitSimdClearXregU32Mask(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iXReg, uint8_t bImm8Mask)
|
---|
7314 | {
|
---|
7315 | uint8_t const idxSimdRegDst = iemNativeSimdRegAllocTmpForGuestSimdReg(pReNative, &off, IEMNATIVEGSTSIMDREG_SIMD(iXReg),
|
---|
7316 | kIemNativeGstSimdRegLdStSz_Low128, kIemNativeGstRegUse_ForUpdate);
|
---|
7317 |
|
---|
7318 | /** @todo r=aeichner For certain bit combinations we could reduce the number of emitted instructions. */
|
---|
7319 | if (bImm8Mask & RT_BIT(0))
|
---|
7320 | off = iemNativeEmitSimdZeroVecRegElemU32(pReNative, off, idxSimdRegDst, 0 /*iDWord*/);
|
---|
7321 | if (bImm8Mask & RT_BIT(1))
|
---|
7322 | off = iemNativeEmitSimdZeroVecRegElemU32(pReNative, off, idxSimdRegDst, 1 /*iDWord*/);
|
---|
7323 | if (bImm8Mask & RT_BIT(2))
|
---|
7324 | off = iemNativeEmitSimdZeroVecRegElemU32(pReNative, off, idxSimdRegDst, 2 /*iDWord*/);
|
---|
7325 | if (bImm8Mask & RT_BIT(3))
|
---|
7326 | off = iemNativeEmitSimdZeroVecRegElemU32(pReNative, off, idxSimdRegDst, 3 /*iDWord*/);
|
---|
7327 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, iXReg);
|
---|
7328 |
|
---|
7329 | /* Free but don't flush the destination register. */
|
---|
7330 | iemNativeSimdRegFreeTmp(pReNative, idxSimdRegDst);
|
---|
7331 |
|
---|
7332 | return off;
|
---|
7333 | }
|
---|
7334 |
|
---|
7335 | #endif /* IEMNATIVE_WITH_SIMD_REG_ALLOCATOR */
|
---|
7336 |
|
---|
7337 |
|
---|
7338 | /*********************************************************************************************************************************
|
---|
7339 | * Include instruction emitters. *
|
---|
7340 | *********************************************************************************************************************************/
|
---|
7341 | #include "target-x86/IEMAllN8veEmit-x86.h"
|
---|
7342 |
|
---|