1 | /* $Id: IEMAllN8veRecompiler.cpp 105877 2024-08-27 23:17:09Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * IEM - Native Recompiler
|
---|
4 | *
|
---|
5 | * Logging group IEM_RE_NATIVE assignments:
|
---|
6 | * - Level 1 (Log) : ...
|
---|
7 | * - Flow (LogFlow) : ...
|
---|
8 | * - Level 2 (Log2) : Details calls as they're recompiled.
|
---|
9 | * - Level 3 (Log3) : Disassemble native code after recompiling.
|
---|
10 | * - Level 4 (Log4) : Delayed PC updating.
|
---|
11 | * - Level 5 (Log5) : ...
|
---|
12 | * - Level 6 (Log6) : ...
|
---|
13 | * - Level 7 (Log7) : ...
|
---|
14 | * - Level 8 (Log8) : ...
|
---|
15 | * - Level 9 (Log9) : ...
|
---|
16 | * - Level 10 (Log10): ...
|
---|
17 | * - Level 11 (Log11): Variable allocator.
|
---|
18 | * - Level 12 (Log12): Register allocator.
|
---|
19 | */
|
---|
20 |
|
---|
21 | /*
|
---|
22 | * Copyright (C) 2023 Oracle and/or its affiliates.
|
---|
23 | *
|
---|
24 | * This file is part of VirtualBox base platform packages, as
|
---|
25 | * available from https://www.virtualbox.org.
|
---|
26 | *
|
---|
27 | * This program is free software; you can redistribute it and/or
|
---|
28 | * modify it under the terms of the GNU General Public License
|
---|
29 | * as published by the Free Software Foundation, in version 3 of the
|
---|
30 | * License.
|
---|
31 | *
|
---|
32 | * This program is distributed in the hope that it will be useful, but
|
---|
33 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
34 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
35 | * General Public License for more details.
|
---|
36 | *
|
---|
37 | * You should have received a copy of the GNU General Public License
|
---|
38 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
39 | *
|
---|
40 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
41 | */
|
---|
42 |
|
---|
43 |
|
---|
44 | /*********************************************************************************************************************************
|
---|
45 | * Header Files *
|
---|
46 | *********************************************************************************************************************************/
|
---|
47 | #define LOG_GROUP LOG_GROUP_IEM_RE_NATIVE
|
---|
48 | #define IEM_WITH_OPAQUE_DECODER_STATE
|
---|
49 | #define VMCPU_INCL_CPUM_GST_CTX
|
---|
50 | #define VMM_INCLUDED_SRC_include_IEMMc_h /* block IEMMc.h inclusion. */
|
---|
51 | #include <VBox/vmm/iem.h>
|
---|
52 | #include <VBox/vmm/cpum.h>
|
---|
53 | #include <VBox/vmm/dbgf.h>
|
---|
54 | #include <VBox/vmm/tm.h>
|
---|
55 | #include "IEMInternal.h"
|
---|
56 | #include <VBox/vmm/vmcc.h>
|
---|
57 | #include <VBox/log.h>
|
---|
58 | #include <VBox/err.h>
|
---|
59 | #include <VBox/dis.h>
|
---|
60 | #include <VBox/param.h>
|
---|
61 | #include <iprt/assert.h>
|
---|
62 | #include <iprt/mem.h>
|
---|
63 | #include <iprt/string.h>
|
---|
64 | #if defined(RT_ARCH_AMD64)
|
---|
65 | # include <iprt/x86.h>
|
---|
66 | #elif defined(RT_ARCH_ARM64)
|
---|
67 | # include <iprt/armv8.h>
|
---|
68 | #endif
|
---|
69 |
|
---|
70 | #ifdef VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER
|
---|
71 | # include "/opt/local/include/capstone/capstone.h"
|
---|
72 | #endif
|
---|
73 |
|
---|
74 | #include "IEMInline.h"
|
---|
75 | #include "IEMThreadedFunctions.h"
|
---|
76 | #include "IEMN8veRecompiler.h"
|
---|
77 | #include "IEMN8veRecompilerEmit.h"
|
---|
78 | #include "IEMN8veRecompilerTlbLookup.h"
|
---|
79 | #include "IEMNativeFunctions.h"
|
---|
80 |
|
---|
81 |
|
---|
82 | /*
|
---|
83 | * Narrow down configs here to avoid wasting time on unused configs here.
|
---|
84 | * Note! Same checks in IEMAllThrdRecompiler.cpp.
|
---|
85 | */
|
---|
86 |
|
---|
87 | #ifndef IEM_WITH_CODE_TLB
|
---|
88 | # error The code TLB must be enabled for the recompiler.
|
---|
89 | #endif
|
---|
90 |
|
---|
91 | #ifndef IEM_WITH_DATA_TLB
|
---|
92 | # error The data TLB must be enabled for the recompiler.
|
---|
93 | #endif
|
---|
94 |
|
---|
95 | #ifndef IEM_WITH_SETJMP
|
---|
96 | # error The setjmp approach must be enabled for the recompiler.
|
---|
97 | #endif
|
---|
98 |
|
---|
99 | /** @todo eliminate this clang build hack. */
|
---|
100 | #if RT_CLANG_PREREQ(4, 0)
|
---|
101 | # pragma GCC diagnostic ignored "-Wunused-function"
|
---|
102 | #endif
|
---|
103 |
|
---|
104 |
|
---|
105 | /*********************************************************************************************************************************
|
---|
106 | * Internal Functions *
|
---|
107 | *********************************************************************************************************************************/
|
---|
108 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
109 | static void iemNativeDbgInfoAddLabel(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType, uint16_t uData);
|
---|
110 | #endif
|
---|
111 | DECL_FORCE_INLINE(void) iemNativeRegClearGstRegShadowing(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg, uint32_t off);
|
---|
112 | DECL_FORCE_INLINE(void) iemNativeRegClearGstRegShadowingOne(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg,
|
---|
113 | IEMNATIVEGSTREG enmGstReg, uint32_t off);
|
---|
114 | DECL_INLINE_THROW(void) iemNativeVarRegisterRelease(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar);
|
---|
115 | static const char *iemNativeGetLabelName(IEMNATIVELABELTYPE enmLabel, bool fCommonCode = false);
|
---|
116 |
|
---|
117 |
|
---|
118 |
|
---|
119 | /*********************************************************************************************************************************
|
---|
120 | * Native Recompilation *
|
---|
121 | *********************************************************************************************************************************/
|
---|
122 |
|
---|
123 |
|
---|
124 | /**
|
---|
125 | * Used by TB code when encountering a non-zero status or rcPassUp after a call.
|
---|
126 | */
|
---|
127 | IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecStatusCodeFiddling,(PVMCPUCC pVCpu, int rc, uint8_t idxInstr))
|
---|
128 | {
|
---|
129 | pVCpu->iem.s.cInstructions += idxInstr;
|
---|
130 | return VBOXSTRICTRC_VAL(iemExecStatusCodeFiddling(pVCpu, rc == VINF_IEM_REEXEC_BREAK ? VINF_SUCCESS : rc));
|
---|
131 | }
|
---|
132 |
|
---|
133 |
|
---|
134 | /**
|
---|
135 | * Helping iemNativeHlpReturnBreakViaLookup and iemNativeHlpReturnBreakViaLookupWithTlb.
|
---|
136 | */
|
---|
137 | DECL_FORCE_INLINE(bool) iemNativeHlpReturnBreakViaLookupIsIrqOrForceFlagPending(PVMCPU pVCpu)
|
---|
138 | {
|
---|
139 | uint64_t fCpu = pVCpu->fLocalForcedActions;
|
---|
140 | fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
|
---|
141 | | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
|
---|
142 | | VMCPU_FF_TLB_FLUSH
|
---|
143 | | VMCPU_FF_UNHALT );
|
---|
144 | /** @todo this isn't even close to the NMI/IRQ conditions in EM. */
|
---|
145 | if (RT_LIKELY( ( !fCpu
|
---|
146 | || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
|
---|
147 | && ( !pVCpu->cpum.GstCtx.rflags.Bits.u1IF
|
---|
148 | || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) )) )
|
---|
149 | && !VM_FF_IS_ANY_SET(pVCpu->CTX_SUFF(pVM), VM_FF_ALL_MASK) ))
|
---|
150 | return false;
|
---|
151 | return true;
|
---|
152 | }
|
---|
153 |
|
---|
154 |
|
---|
155 | /**
|
---|
156 | * Used by TB code when encountering a non-zero status or rcPassUp after a call.
|
---|
157 | */
|
---|
158 | template <bool const a_fWithIrqCheck>
|
---|
159 | IEM_DECL_NATIVE_HLP_DEF(uintptr_t, iemNativeHlpReturnBreakViaLookup,(PVMCPUCC pVCpu, uint8_t idxTbLookup,
|
---|
160 | uint32_t fFlags, RTGCPHYS GCPhysPc))
|
---|
161 | {
|
---|
162 | PIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
|
---|
163 | Assert(idxTbLookup < pTb->cTbLookupEntries);
|
---|
164 | PIEMTB * const ppNewTb = IEMTB_GET_TB_LOOKUP_TAB_ENTRY(pTb, idxTbLookup);
|
---|
165 | #if 1
|
---|
166 | PIEMTB const pNewTb = *ppNewTb;
|
---|
167 | if (pNewTb)
|
---|
168 | {
|
---|
169 | # ifdef VBOX_STRICT
|
---|
170 | uint64_t const uFlatPcAssert = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
|
---|
171 | AssertMsg( (uFlatPcAssert & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK) == pVCpu->iem.s.uInstrBufPc
|
---|
172 | && (GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK) == pVCpu->iem.s.GCPhysInstrBuf
|
---|
173 | && (GCPhysPc & GUEST_PAGE_OFFSET_MASK) == (uFlatPcAssert & GUEST_PAGE_OFFSET_MASK),
|
---|
174 | ("GCPhysPc=%RGp uFlatPcAssert=%#RX64 uInstrBufPc=%#RX64 GCPhysInstrBuf=%RGp\n",
|
---|
175 | GCPhysPc, uFlatPcAssert, pVCpu->iem.s.uInstrBufPc, pVCpu->iem.s.GCPhysInstrBuf));
|
---|
176 | # endif
|
---|
177 | if (pNewTb->GCPhysPc == GCPhysPc)
|
---|
178 | {
|
---|
179 | # ifdef VBOX_STRICT
|
---|
180 | uint32_t fAssertFlags = (pVCpu->iem.s.fExec & IEMTB_F_IEM_F_MASK & IEMTB_F_KEY_MASK) | IEMTB_F_TYPE_NATIVE;
|
---|
181 | if (pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_SHADOW)
|
---|
182 | fAssertFlags |= IEMTB_F_INHIBIT_SHADOW;
|
---|
183 | if (pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_NMI)
|
---|
184 | fAssertFlags |= IEMTB_F_INHIBIT_NMI;
|
---|
185 | # if 1 /** @todo breaks on IP/EIP/RIP wraparound tests in bs3-cpu-weird-1. */
|
---|
186 | Assert(IEM_F_MODE_X86_IS_FLAT(fFlags));
|
---|
187 | # else
|
---|
188 | if (!IEM_F_MODE_X86_IS_FLAT(fFlags))
|
---|
189 | {
|
---|
190 | int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
|
---|
191 | if (offFromLim < X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
|
---|
192 | fAssertFlags |= IEMTB_F_CS_LIM_CHECKS;
|
---|
193 | }
|
---|
194 | # endif
|
---|
195 | Assert(!(fFlags & ~(IEMTB_F_KEY_MASK | IEMTB_F_TYPE_MASK)));
|
---|
196 | AssertMsg(fFlags == fAssertFlags, ("fFlags=%#RX32 fAssertFlags=%#RX32 cs:rip=%04x:%#010RX64\n",
|
---|
197 | fFlags, fAssertFlags, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
198 | #endif
|
---|
199 |
|
---|
200 | /*
|
---|
201 | * Check them + type.
|
---|
202 | */
|
---|
203 | if ((pNewTb->fFlags & (IEMTB_F_KEY_MASK | IEMTB_F_TYPE_MASK)) == fFlags)
|
---|
204 | {
|
---|
205 | /*
|
---|
206 | * Check for interrupts and stuff.
|
---|
207 | */
|
---|
208 | /** @todo We duplicate code here that's also in iemNativeHlpReturnBreakViaLookupWithTlb.
|
---|
209 | * The main problem are the statistics and to some degree the logging. :/ */
|
---|
210 | if (!a_fWithIrqCheck || !iemNativeHlpReturnBreakViaLookupIsIrqOrForceFlagPending(pVCpu) )
|
---|
211 | {
|
---|
212 | /* Do polling. */
|
---|
213 | if ( RT_LIKELY((int32_t)--pVCpu->iem.s.cTbsTillNextTimerPoll > 0)
|
---|
214 | || iemPollTimers(pVCpu->CTX_SUFF(pVM), pVCpu) == VINF_SUCCESS)
|
---|
215 | {
|
---|
216 | /*
|
---|
217 | * Success. Update statistics and switch to the next TB.
|
---|
218 | */
|
---|
219 | if (a_fWithIrqCheck)
|
---|
220 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitDirectLinking1Irq);
|
---|
221 | else
|
---|
222 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitDirectLinking1NoIrq);
|
---|
223 |
|
---|
224 | pNewTb->cUsed += 1;
|
---|
225 | pNewTb->msLastUsed = pVCpu->iem.s.msRecompilerPollNow;
|
---|
226 | pVCpu->iem.s.pCurTbR3 = pNewTb;
|
---|
227 | pVCpu->iem.s.ppTbLookupEntryR3 = IEMTB_GET_TB_LOOKUP_TAB_ENTRY(pNewTb, 0);
|
---|
228 | pVCpu->iem.s.cTbExecNative += 1;
|
---|
229 | Log10(("iemNativeHlpReturnBreakViaLookupWithPc: match at %04x:%08RX64 (%RGp): pTb=%p[%#x]-> %p\n",
|
---|
230 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhysPc, pTb, idxTbLookup, pNewTb));
|
---|
231 | return (uintptr_t)pNewTb->Native.paInstructions;
|
---|
232 | }
|
---|
233 | }
|
---|
234 | Log10(("iemNativeHlpReturnBreakViaLookupWithPc: IRQ or FF pending\n"));
|
---|
235 | STAM_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitDirectLinking1PendingIrq);
|
---|
236 | }
|
---|
237 | else
|
---|
238 | {
|
---|
239 | Log10(("iemNativeHlpReturnBreakViaLookupWithPc: fFlags mismatch at %04x:%08RX64: %#x vs %#x (pTb=%p[%#x]-> %p)\n",
|
---|
240 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags, pNewTb->fFlags, pTb, idxTbLookup, pNewTb));
|
---|
241 | STAM_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchFlags);
|
---|
242 | }
|
---|
243 | }
|
---|
244 | else
|
---|
245 | {
|
---|
246 | Log10(("iemNativeHlpReturnBreakViaLookupWithPc: GCPhysPc mismatch at %04x:%08RX64: %RGp vs %RGp (pTb=%p[%#x]-> %p)\n",
|
---|
247 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhysPc, pNewTb->GCPhysPc, pTb, idxTbLookup, pNewTb));
|
---|
248 | STAM_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchGCPhysPc);
|
---|
249 | }
|
---|
250 | }
|
---|
251 | else
|
---|
252 | STAM_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitDirectLinking1NoTb);
|
---|
253 | #else
|
---|
254 | NOREF(GCPhysPc);
|
---|
255 | #endif
|
---|
256 |
|
---|
257 | pVCpu->iem.s.ppTbLookupEntryR3 = ppNewTb;
|
---|
258 | return 0;
|
---|
259 | }
|
---|
260 |
|
---|
261 |
|
---|
262 | /**
|
---|
263 | * Used by TB code when encountering a non-zero status or rcPassUp after a call.
|
---|
264 | */
|
---|
265 | template <bool const a_fWithIrqCheck>
|
---|
266 | IEM_DECL_NATIVE_HLP_DEF(uintptr_t, iemNativeHlpReturnBreakViaLookupWithTlb,(PVMCPUCC pVCpu, uint8_t idxTbLookup))
|
---|
267 | {
|
---|
268 | PIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
|
---|
269 | Assert(idxTbLookup < pTb->cTbLookupEntries);
|
---|
270 | PIEMTB * const ppNewTb = IEMTB_GET_TB_LOOKUP_TAB_ENTRY(pTb, idxTbLookup);
|
---|
271 | #if 1
|
---|
272 | PIEMTB const pNewTb = *ppNewTb;
|
---|
273 | if (pNewTb)
|
---|
274 | {
|
---|
275 | /*
|
---|
276 | * Calculate the flags for the next TB and check if they match.
|
---|
277 | */
|
---|
278 | uint32_t fFlags = (pVCpu->iem.s.fExec & IEMTB_F_IEM_F_MASK & IEMTB_F_KEY_MASK) | IEMTB_F_TYPE_NATIVE;
|
---|
279 | if (!(pVCpu->cpum.GstCtx.rflags.uBoth & (CPUMCTX_INHIBIT_SHADOW | CPUMCTX_INHIBIT_NMI)))
|
---|
280 | { /* likely */ }
|
---|
281 | else
|
---|
282 | {
|
---|
283 | if (pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_SHADOW)
|
---|
284 | fFlags |= IEMTB_F_INHIBIT_SHADOW;
|
---|
285 | if (pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_NMI)
|
---|
286 | fFlags |= IEMTB_F_INHIBIT_NMI;
|
---|
287 | }
|
---|
288 | if (!IEM_F_MODE_X86_IS_FLAT(fFlags))
|
---|
289 | {
|
---|
290 | int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
|
---|
291 | if (offFromLim >= X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
|
---|
292 | { /* likely */ }
|
---|
293 | else
|
---|
294 | fFlags |= IEMTB_F_CS_LIM_CHECKS;
|
---|
295 | }
|
---|
296 | Assert(!(fFlags & ~(IEMTB_F_KEY_MASK | IEMTB_F_TYPE_MASK)));
|
---|
297 |
|
---|
298 | if ((pNewTb->fFlags & (IEMTB_F_KEY_MASK | IEMTB_F_TYPE_MASK)) == fFlags)
|
---|
299 | {
|
---|
300 | /*
|
---|
301 | * Do the TLB lookup for flat RIP and compare the result with the next TB.
|
---|
302 | *
|
---|
303 | * Note! This replicates iemGetPcWithPhysAndCode and iemGetPcWithPhysAndCodeMissed.
|
---|
304 | */
|
---|
305 | /* Calc the effective PC. */
|
---|
306 | uint64_t uPc = pVCpu->cpum.GstCtx.rip;
|
---|
307 | Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu));
|
---|
308 | uPc += pVCpu->cpum.GstCtx.cs.u64Base;
|
---|
309 |
|
---|
310 | /* Advance within the current buffer (PAGE) when possible. */
|
---|
311 | RTGCPHYS GCPhysPc;
|
---|
312 | uint64_t off;
|
---|
313 | if ( pVCpu->iem.s.pbInstrBuf
|
---|
314 | && (off = uPc - pVCpu->iem.s.uInstrBufPc) < pVCpu->iem.s.cbInstrBufTotal) /*ugly*/
|
---|
315 | {
|
---|
316 | pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
|
---|
317 | pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
|
---|
318 | if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
|
---|
319 | pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
|
---|
320 | else
|
---|
321 | pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
|
---|
322 | GCPhysPc = pVCpu->iem.s.GCPhysInstrBuf + off;
|
---|
323 | }
|
---|
324 | else
|
---|
325 | {
|
---|
326 | pVCpu->iem.s.pbInstrBuf = NULL;
|
---|
327 | pVCpu->iem.s.offCurInstrStart = 0;
|
---|
328 | pVCpu->iem.s.offInstrNextByte = 0;
|
---|
329 | iemOpcodeFetchBytesJmp(pVCpu, 0, NULL);
|
---|
330 | GCPhysPc = pVCpu->iem.s.pbInstrBuf ? pVCpu->iem.s.GCPhysInstrBuf + pVCpu->iem.s.offCurInstrStart : NIL_RTGCPHYS;
|
---|
331 | }
|
---|
332 |
|
---|
333 | if (pNewTb->GCPhysPc == GCPhysPc)
|
---|
334 | {
|
---|
335 | /*
|
---|
336 | * Check for interrupts and stuff.
|
---|
337 | */
|
---|
338 | /** @todo We duplicate code here that's also in iemNativeHlpReturnBreakViaLookupWithPc.
|
---|
339 | * The main problem are the statistics and to some degree the logging. :/ */
|
---|
340 | if (!a_fWithIrqCheck || !iemNativeHlpReturnBreakViaLookupIsIrqOrForceFlagPending(pVCpu) )
|
---|
341 | {
|
---|
342 | /* Do polling. */
|
---|
343 | if ( RT_LIKELY((int32_t)--pVCpu->iem.s.cTbsTillNextTimerPoll > 0)
|
---|
344 | || iemPollTimers(pVCpu->CTX_SUFF(pVM), pVCpu) == VINF_SUCCESS)
|
---|
345 | {
|
---|
346 | /*
|
---|
347 | * Success. Update statistics and switch to the next TB.
|
---|
348 | */
|
---|
349 | if (a_fWithIrqCheck)
|
---|
350 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitDirectLinking2Irq);
|
---|
351 | else
|
---|
352 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitDirectLinking2NoIrq);
|
---|
353 |
|
---|
354 | pNewTb->cUsed += 1;
|
---|
355 | pNewTb->msLastUsed = pVCpu->iem.s.msRecompilerPollNow;
|
---|
356 | pVCpu->iem.s.pCurTbR3 = pNewTb;
|
---|
357 | pVCpu->iem.s.ppTbLookupEntryR3 = IEMTB_GET_TB_LOOKUP_TAB_ENTRY(pNewTb, 0);
|
---|
358 | pVCpu->iem.s.cTbExecNative += 1;
|
---|
359 | Log10(("iemNativeHlpReturnBreakViaLookupWithTlb: match at %04x:%08RX64 (%RGp): pTb=%p[%#x]-> %p\n",
|
---|
360 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhysPc, pTb, idxTbLookup, pNewTb));
|
---|
361 | return (uintptr_t)pNewTb->Native.paInstructions;
|
---|
362 | }
|
---|
363 | }
|
---|
364 | Log10(("iemNativeHlpReturnBreakViaLookupWithTlb: IRQ or FF pending\n"));
|
---|
365 | STAM_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitDirectLinking2PendingIrq);
|
---|
366 | }
|
---|
367 | else
|
---|
368 | {
|
---|
369 | Log10(("iemNativeHlpReturnBreakViaLookupWithTlb: GCPhysPc mismatch at %04x:%08RX64: %RGp vs %RGp (pTb=%p[%#x]-> %p)\n",
|
---|
370 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhysPc, pNewTb->GCPhysPc, pTb, idxTbLookup, pNewTb));
|
---|
371 | STAM_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchGCPhysPc);
|
---|
372 | }
|
---|
373 | }
|
---|
374 | else
|
---|
375 | {
|
---|
376 | Log10(("iemNativeHlpReturnBreakViaLookupWithTlb: fFlags mismatch at %04x:%08RX64: %#x vs %#x (pTb=%p[%#x]-> %p)\n",
|
---|
377 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags, pNewTb->fFlags, pTb, idxTbLookup, pNewTb));
|
---|
378 | STAM_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchFlags);
|
---|
379 | }
|
---|
380 | }
|
---|
381 | else
|
---|
382 | STAM_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitDirectLinking2NoTb);
|
---|
383 | #else
|
---|
384 | NOREF(fFlags);
|
---|
385 | STAM_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitDirectLinking2NoTb); /* just for some stats, even if misleading */
|
---|
386 | #endif
|
---|
387 |
|
---|
388 | pVCpu->iem.s.ppTbLookupEntryR3 = ppNewTb;
|
---|
389 | return 0;
|
---|
390 | }
|
---|
391 |
|
---|
392 |
|
---|
393 | /**
|
---|
394 | * Used by TB code when it wants to raise a \#DE.
|
---|
395 | */
|
---|
396 | IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseDe,(PVMCPUCC pVCpu))
|
---|
397 | {
|
---|
398 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitRaiseDe);
|
---|
399 | iemRaiseDivideErrorJmp(pVCpu);
|
---|
400 | #ifndef _MSC_VER
|
---|
401 | return VINF_IEM_RAISED_XCPT; /* not reached */
|
---|
402 | #endif
|
---|
403 | }
|
---|
404 |
|
---|
405 |
|
---|
406 | /**
|
---|
407 | * Used by TB code when it wants to raise a \#UD.
|
---|
408 | */
|
---|
409 | IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseUd,(PVMCPUCC pVCpu))
|
---|
410 | {
|
---|
411 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitRaiseUd);
|
---|
412 | iemRaiseUndefinedOpcodeJmp(pVCpu);
|
---|
413 | #ifndef _MSC_VER
|
---|
414 | return VINF_IEM_RAISED_XCPT; /* not reached */
|
---|
415 | #endif
|
---|
416 | }
|
---|
417 |
|
---|
418 |
|
---|
419 | /**
|
---|
420 | * Used by TB code when it wants to raise an SSE related \#UD or \#NM.
|
---|
421 | *
|
---|
422 | * See IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT.
|
---|
423 | */
|
---|
424 | IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseSseRelated,(PVMCPUCC pVCpu))
|
---|
425 | {
|
---|
426 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitRaiseSseRelated);
|
---|
427 | if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
|
---|
428 | || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
|
---|
429 | iemRaiseUndefinedOpcodeJmp(pVCpu);
|
---|
430 | else
|
---|
431 | iemRaiseDeviceNotAvailableJmp(pVCpu);
|
---|
432 | #ifndef _MSC_VER
|
---|
433 | return VINF_IEM_RAISED_XCPT; /* not reached */
|
---|
434 | #endif
|
---|
435 | }
|
---|
436 |
|
---|
437 |
|
---|
438 | /**
|
---|
439 | * Used by TB code when it wants to raise an AVX related \#UD or \#NM.
|
---|
440 | *
|
---|
441 | * See IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT.
|
---|
442 | */
|
---|
443 | IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseAvxRelated,(PVMCPUCC pVCpu))
|
---|
444 | {
|
---|
445 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitRaiseAvxRelated);
|
---|
446 | if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE)
|
---|
447 | || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
|
---|
448 | iemRaiseUndefinedOpcodeJmp(pVCpu);
|
---|
449 | else
|
---|
450 | iemRaiseDeviceNotAvailableJmp(pVCpu);
|
---|
451 | #ifndef _MSC_VER
|
---|
452 | return VINF_IEM_RAISED_XCPT; /* not reached */
|
---|
453 | #endif
|
---|
454 | }
|
---|
455 |
|
---|
456 |
|
---|
457 | /**
|
---|
458 | * Used by TB code when it wants to raise an SSE/AVX floating point exception related \#UD or \#XF.
|
---|
459 | *
|
---|
460 | * See IEM_MC_CALL_AVX_XXX/IEM_MC_CALL_SSE_XXX.
|
---|
461 | */
|
---|
462 | IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseSseAvxFpRelated,(PVMCPUCC pVCpu))
|
---|
463 | {
|
---|
464 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitRaiseSseAvxFpRelated);
|
---|
465 | if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXMMEEXCPT)
|
---|
466 | iemRaiseSimdFpExceptionJmp(pVCpu);
|
---|
467 | else
|
---|
468 | iemRaiseUndefinedOpcodeJmp(pVCpu);
|
---|
469 | #ifndef _MSC_VER
|
---|
470 | return VINF_IEM_RAISED_XCPT; /* not reached */
|
---|
471 | #endif
|
---|
472 | }
|
---|
473 |
|
---|
474 |
|
---|
475 | /**
|
---|
476 | * Used by TB code when it wants to raise a \#NM.
|
---|
477 | */
|
---|
478 | IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseNm,(PVMCPUCC pVCpu))
|
---|
479 | {
|
---|
480 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitRaiseNm);
|
---|
481 | iemRaiseDeviceNotAvailableJmp(pVCpu);
|
---|
482 | #ifndef _MSC_VER
|
---|
483 | return VINF_IEM_RAISED_XCPT; /* not reached */
|
---|
484 | #endif
|
---|
485 | }
|
---|
486 |
|
---|
487 |
|
---|
488 | /**
|
---|
489 | * Used by TB code when it wants to raise a \#GP(0).
|
---|
490 | */
|
---|
491 | IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseGp0,(PVMCPUCC pVCpu))
|
---|
492 | {
|
---|
493 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitRaiseGp0);
|
---|
494 | iemRaiseGeneralProtectionFault0Jmp(pVCpu);
|
---|
495 | #ifndef _MSC_VER
|
---|
496 | return VINF_IEM_RAISED_XCPT; /* not reached */
|
---|
497 | #endif
|
---|
498 | }
|
---|
499 |
|
---|
500 |
|
---|
501 | /**
|
---|
502 | * Used by TB code when it wants to raise a \#MF.
|
---|
503 | */
|
---|
504 | IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseMf,(PVMCPUCC pVCpu))
|
---|
505 | {
|
---|
506 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitRaiseMf);
|
---|
507 | iemRaiseMathFaultJmp(pVCpu);
|
---|
508 | #ifndef _MSC_VER
|
---|
509 | return VINF_IEM_RAISED_XCPT; /* not reached */
|
---|
510 | #endif
|
---|
511 | }
|
---|
512 |
|
---|
513 |
|
---|
514 | /**
|
---|
515 | * Used by TB code when it wants to raise a \#XF.
|
---|
516 | */
|
---|
517 | IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseXf,(PVMCPUCC pVCpu))
|
---|
518 | {
|
---|
519 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitRaiseXf);
|
---|
520 | iemRaiseSimdFpExceptionJmp(pVCpu);
|
---|
521 | #ifndef _MSC_VER
|
---|
522 | return VINF_IEM_RAISED_XCPT; /* not reached */
|
---|
523 | #endif
|
---|
524 | }
|
---|
525 |
|
---|
526 |
|
---|
527 | /**
|
---|
528 | * Used by TB code when detecting opcode changes.
|
---|
529 | * @see iemThreadeFuncWorkerObsoleteTb
|
---|
530 | */
|
---|
531 | IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpObsoleteTb,(PVMCPUCC pVCpu))
|
---|
532 | {
|
---|
533 | /* We set fSafeToFree to false where as we're being called in the context
|
---|
534 | of a TB callback function, which for native TBs means we cannot release
|
---|
535 | the executable memory till we've returned our way back to iemTbExec as
|
---|
536 | that return path codes via the native code generated for the TB. */
|
---|
537 | Log7(("TB obsolete: %p at %04x:%08RX64\n", pVCpu->iem.s.pCurTbR3, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
538 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeTbExitObsoleteTb);
|
---|
539 | iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3, false /*fSafeToFree*/);
|
---|
540 | return VINF_IEM_REEXEC_BREAK;
|
---|
541 | }
|
---|
542 |
|
---|
543 |
|
---|
544 | /**
|
---|
545 | * Used by TB code when we need to switch to a TB with CS.LIM checking.
|
---|
546 | */
|
---|
547 | IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpNeedCsLimChecking,(PVMCPUCC pVCpu))
|
---|
548 | {
|
---|
549 | Log7(("TB need CS.LIM: %p at %04x:%08RX64; offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n",
|
---|
550 | pVCpu->iem.s.pCurTbR3, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
|
---|
551 | (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.rip,
|
---|
552 | pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base));
|
---|
553 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckNeedCsLimChecking);
|
---|
554 | return VINF_IEM_REEXEC_BREAK;
|
---|
555 | }
|
---|
556 |
|
---|
557 |
|
---|
558 | /**
|
---|
559 | * Used by TB code when we missed a PC check after a branch.
|
---|
560 | */
|
---|
561 | IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpCheckBranchMiss,(PVMCPUCC pVCpu))
|
---|
562 | {
|
---|
563 | Log7(("TB jmp miss: %p at %04x:%08RX64; GCPhysWithOffset=%RGp, pbInstrBuf=%p\n",
|
---|
564 | pVCpu->iem.s.pCurTbR3, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
|
---|
565 | pVCpu->iem.s.GCPhysInstrBuf + pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base - pVCpu->iem.s.uInstrBufPc,
|
---|
566 | pVCpu->iem.s.pbInstrBuf));
|
---|
567 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses);
|
---|
568 | return VINF_IEM_REEXEC_BREAK;
|
---|
569 | }
|
---|
570 |
|
---|
571 |
|
---|
572 |
|
---|
573 | /*********************************************************************************************************************************
|
---|
574 | * Helpers: Segmented memory fetches and stores. *
|
---|
575 | *********************************************************************************************************************************/
|
---|
576 |
|
---|
577 | /**
|
---|
578 | * Used by TB code to load unsigned 8-bit data w/ segmentation.
|
---|
579 | */
|
---|
580 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU8,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
581 | {
|
---|
582 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
583 | return (uint64_t)iemMemFetchDataU8SafeJmp(pVCpu, iSegReg, GCPtrMem);
|
---|
584 | #else
|
---|
585 | return (uint64_t)iemMemFetchDataU8Jmp(pVCpu, iSegReg, GCPtrMem);
|
---|
586 | #endif
|
---|
587 | }
|
---|
588 |
|
---|
589 |
|
---|
590 | /**
|
---|
591 | * Used by TB code to load signed 8-bit data w/ segmentation, sign extending it
|
---|
592 | * to 16 bits.
|
---|
593 | */
|
---|
594 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU8_Sx_U16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
595 | {
|
---|
596 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
597 | return (uint64_t)(uint16_t)(int16_t)(int8_t)iemMemFetchDataU8SafeJmp(pVCpu, iSegReg, GCPtrMem);
|
---|
598 | #else
|
---|
599 | return (uint64_t)(uint16_t)(int16_t)(int8_t)iemMemFetchDataU8Jmp(pVCpu, iSegReg, GCPtrMem);
|
---|
600 | #endif
|
---|
601 | }
|
---|
602 |
|
---|
603 |
|
---|
604 | /**
|
---|
605 | * Used by TB code to load signed 8-bit data w/ segmentation, sign extending it
|
---|
606 | * to 32 bits.
|
---|
607 | */
|
---|
608 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU8_Sx_U32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
609 | {
|
---|
610 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
611 | return (uint64_t)(uint32_t)(int32_t)(int8_t)iemMemFetchDataU8SafeJmp(pVCpu, iSegReg, GCPtrMem);
|
---|
612 | #else
|
---|
613 | return (uint64_t)(uint32_t)(int32_t)(int8_t)iemMemFetchDataU8Jmp(pVCpu, iSegReg, GCPtrMem);
|
---|
614 | #endif
|
---|
615 | }
|
---|
616 |
|
---|
617 | /**
|
---|
618 | * Used by TB code to load signed 8-bit data w/ segmentation, sign extending it
|
---|
619 | * to 64 bits.
|
---|
620 | */
|
---|
621 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU8_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
622 | {
|
---|
623 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
624 | return (uint64_t)(int64_t)(int8_t)iemMemFetchDataU8SafeJmp(pVCpu, iSegReg, GCPtrMem);
|
---|
625 | #else
|
---|
626 | return (uint64_t)(int64_t)(int8_t)iemMemFetchDataU8Jmp(pVCpu, iSegReg, GCPtrMem);
|
---|
627 | #endif
|
---|
628 | }
|
---|
629 |
|
---|
630 |
|
---|
631 | /**
|
---|
632 | * Used by TB code to load unsigned 16-bit data w/ segmentation.
|
---|
633 | */
|
---|
634 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
635 | {
|
---|
636 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
637 | return (uint64_t)iemMemFetchDataU16SafeJmp(pVCpu, iSegReg, GCPtrMem);
|
---|
638 | #else
|
---|
639 | return (uint64_t)iemMemFetchDataU16Jmp(pVCpu, iSegReg, GCPtrMem);
|
---|
640 | #endif
|
---|
641 | }
|
---|
642 |
|
---|
643 |
|
---|
644 | /**
|
---|
645 | * Used by TB code to load signed 16-bit data w/ segmentation, sign extending it
|
---|
646 | * to 32 bits.
|
---|
647 | */
|
---|
648 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU16_Sx_U32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
649 | {
|
---|
650 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
651 | return (uint64_t)(uint32_t)(int32_t)(int16_t)iemMemFetchDataU16SafeJmp(pVCpu, iSegReg, GCPtrMem);
|
---|
652 | #else
|
---|
653 | return (uint64_t)(uint32_t)(int32_t)(int16_t)iemMemFetchDataU16Jmp(pVCpu, iSegReg, GCPtrMem);
|
---|
654 | #endif
|
---|
655 | }
|
---|
656 |
|
---|
657 |
|
---|
658 | /**
|
---|
659 | * Used by TB code to load signed 16-bit data w/ segmentation, sign extending it
|
---|
660 | * to 64 bits.
|
---|
661 | */
|
---|
662 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU16_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
663 | {
|
---|
664 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
665 | return (uint64_t)(int64_t)(int16_t)iemMemFetchDataU16SafeJmp(pVCpu, iSegReg, GCPtrMem);
|
---|
666 | #else
|
---|
667 | return (uint64_t)(int64_t)(int16_t)iemMemFetchDataU16Jmp(pVCpu, iSegReg, GCPtrMem);
|
---|
668 | #endif
|
---|
669 | }
|
---|
670 |
|
---|
671 |
|
---|
672 | /**
|
---|
673 | * Used by TB code to load unsigned 32-bit data w/ segmentation.
|
---|
674 | */
|
---|
675 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
676 | {
|
---|
677 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
678 | return (uint64_t)iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
|
---|
679 | #else
|
---|
680 | return (uint64_t)iemMemFetchDataU32Jmp(pVCpu, iSegReg, GCPtrMem);
|
---|
681 | #endif
|
---|
682 | }
|
---|
683 |
|
---|
684 |
|
---|
685 | /**
|
---|
686 | * Used by TB code to load signed 32-bit data w/ segmentation, sign extending it
|
---|
687 | * to 64 bits.
|
---|
688 | */
|
---|
689 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU32_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
690 | {
|
---|
691 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
692 | return (uint64_t)(int64_t)(int32_t)iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
|
---|
693 | #else
|
---|
694 | return (uint64_t)(int64_t)(int32_t)iemMemFetchDataU32Jmp(pVCpu, iSegReg, GCPtrMem);
|
---|
695 | #endif
|
---|
696 | }
|
---|
697 |
|
---|
698 |
|
---|
699 | /**
|
---|
700 | * Used by TB code to load unsigned 64-bit data w/ segmentation.
|
---|
701 | */
|
---|
702 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
703 | {
|
---|
704 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
705 | return iemMemFetchDataU64SafeJmp(pVCpu, iSegReg, GCPtrMem);
|
---|
706 | #else
|
---|
707 | return iemMemFetchDataU64Jmp(pVCpu, iSegReg, GCPtrMem);
|
---|
708 | #endif
|
---|
709 | }
|
---|
710 |
|
---|
711 |
|
---|
712 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
713 | /**
|
---|
714 | * Used by TB code to load 128-bit data w/ segmentation.
|
---|
715 | */
|
---|
716 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFetchDataU128,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PRTUINT128U pu128Dst))
|
---|
717 | {
|
---|
718 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
719 | iemMemFetchDataU128SafeJmp(pVCpu, pu128Dst, iSegReg, GCPtrMem);
|
---|
720 | #else
|
---|
721 | iemMemFetchDataU128Jmp(pVCpu, pu128Dst, iSegReg, GCPtrMem);
|
---|
722 | #endif
|
---|
723 | }
|
---|
724 |
|
---|
725 |
|
---|
726 | /**
|
---|
727 | * Used by TB code to load 128-bit data w/ segmentation.
|
---|
728 | */
|
---|
729 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFetchDataU128AlignedSse,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PRTUINT128U pu128Dst))
|
---|
730 | {
|
---|
731 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
732 | iemMemFetchDataU128AlignedSseSafeJmp(pVCpu, pu128Dst, iSegReg, GCPtrMem);
|
---|
733 | #else
|
---|
734 | iemMemFetchDataU128AlignedSseJmp(pVCpu, pu128Dst, iSegReg, GCPtrMem);
|
---|
735 | #endif
|
---|
736 | }
|
---|
737 |
|
---|
738 |
|
---|
739 | /**
|
---|
740 | * Used by TB code to load 128-bit data w/ segmentation.
|
---|
741 | */
|
---|
742 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFetchDataU128NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PRTUINT128U pu128Dst))
|
---|
743 | {
|
---|
744 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
745 | iemMemFetchDataU128NoAcSafeJmp(pVCpu, pu128Dst, iSegReg, GCPtrMem);
|
---|
746 | #else
|
---|
747 | iemMemFetchDataU128NoAcJmp(pVCpu, pu128Dst, iSegReg, GCPtrMem);
|
---|
748 | #endif
|
---|
749 | }
|
---|
750 |
|
---|
751 |
|
---|
752 | /**
|
---|
753 | * Used by TB code to load 256-bit data w/ segmentation.
|
---|
754 | */
|
---|
755 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFetchDataU256NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PRTUINT256U pu256Dst))
|
---|
756 | {
|
---|
757 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
758 | iemMemFetchDataU256NoAcSafeJmp(pVCpu, pu256Dst, iSegReg, GCPtrMem);
|
---|
759 | #else
|
---|
760 | iemMemFetchDataU256NoAcJmp(pVCpu, pu256Dst, iSegReg, GCPtrMem);
|
---|
761 | #endif
|
---|
762 | }
|
---|
763 |
|
---|
764 |
|
---|
765 | /**
|
---|
766 | * Used by TB code to load 256-bit data w/ segmentation.
|
---|
767 | */
|
---|
768 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFetchDataU256AlignedAvx,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PRTUINT256U pu256Dst))
|
---|
769 | {
|
---|
770 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
771 | iemMemFetchDataU256AlignedAvxSafeJmp(pVCpu, pu256Dst, iSegReg, GCPtrMem);
|
---|
772 | #else
|
---|
773 | iemMemFetchDataU256AlignedAvxJmp(pVCpu, pu256Dst, iSegReg, GCPtrMem);
|
---|
774 | #endif
|
---|
775 | }
|
---|
776 | #endif
|
---|
777 |
|
---|
778 |
|
---|
779 | /**
|
---|
780 | * Used by TB code to store unsigned 8-bit data w/ segmentation.
|
---|
781 | */
|
---|
782 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemStoreDataU8,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, uint8_t u8Value))
|
---|
783 | {
|
---|
784 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
785 | iemMemStoreDataU8SafeJmp(pVCpu, iSegReg, GCPtrMem, u8Value);
|
---|
786 | #else
|
---|
787 | iemMemStoreDataU8Jmp(pVCpu, iSegReg, GCPtrMem, u8Value);
|
---|
788 | #endif
|
---|
789 | }
|
---|
790 |
|
---|
791 |
|
---|
792 | /**
|
---|
793 | * Used by TB code to store unsigned 16-bit data w/ segmentation.
|
---|
794 | */
|
---|
795 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemStoreDataU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, uint16_t u16Value))
|
---|
796 | {
|
---|
797 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
798 | iemMemStoreDataU16SafeJmp(pVCpu, iSegReg, GCPtrMem, u16Value);
|
---|
799 | #else
|
---|
800 | iemMemStoreDataU16Jmp(pVCpu, iSegReg, GCPtrMem, u16Value);
|
---|
801 | #endif
|
---|
802 | }
|
---|
803 |
|
---|
804 |
|
---|
805 | /**
|
---|
806 | * Used by TB code to store unsigned 32-bit data w/ segmentation.
|
---|
807 | */
|
---|
808 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemStoreDataU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, uint32_t u32Value))
|
---|
809 | {
|
---|
810 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
811 | iemMemStoreDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem, u32Value);
|
---|
812 | #else
|
---|
813 | iemMemStoreDataU32Jmp(pVCpu, iSegReg, GCPtrMem, u32Value);
|
---|
814 | #endif
|
---|
815 | }
|
---|
816 |
|
---|
817 |
|
---|
818 | /**
|
---|
819 | * Used by TB code to store unsigned 64-bit data w/ segmentation.
|
---|
820 | */
|
---|
821 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemStoreDataU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, uint64_t u64Value))
|
---|
822 | {
|
---|
823 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
824 | iemMemStoreDataU64SafeJmp(pVCpu, iSegReg, GCPtrMem, u64Value);
|
---|
825 | #else
|
---|
826 | iemMemStoreDataU64Jmp(pVCpu, iSegReg, GCPtrMem, u64Value);
|
---|
827 | #endif
|
---|
828 | }
|
---|
829 |
|
---|
830 |
|
---|
831 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
832 | /**
|
---|
833 | * Used by TB code to store unsigned 128-bit data w/ segmentation.
|
---|
834 | */
|
---|
835 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemStoreDataU128AlignedSse,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PCRTUINT128U pu128Src))
|
---|
836 | {
|
---|
837 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
838 | iemMemStoreDataU128AlignedSseSafeJmp(pVCpu, iSegReg, GCPtrMem, pu128Src);
|
---|
839 | #else
|
---|
840 | iemMemStoreDataU128AlignedSseJmp(pVCpu, iSegReg, GCPtrMem, pu128Src);
|
---|
841 | #endif
|
---|
842 | }
|
---|
843 |
|
---|
844 |
|
---|
845 | /**
|
---|
846 | * Used by TB code to store unsigned 128-bit data w/ segmentation.
|
---|
847 | */
|
---|
848 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemStoreDataU128NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PCRTUINT128U pu128Src))
|
---|
849 | {
|
---|
850 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
851 | iemMemStoreDataU128NoAcSafeJmp(pVCpu, iSegReg, GCPtrMem, pu128Src);
|
---|
852 | #else
|
---|
853 | iemMemStoreDataU128NoAcJmp(pVCpu, iSegReg, GCPtrMem, pu128Src);
|
---|
854 | #endif
|
---|
855 | }
|
---|
856 |
|
---|
857 |
|
---|
858 | /**
|
---|
859 | * Used by TB code to store unsigned 256-bit data w/ segmentation.
|
---|
860 | */
|
---|
861 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemStoreDataU256NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PCRTUINT256U pu256Src))
|
---|
862 | {
|
---|
863 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
864 | iemMemStoreDataU256NoAcSafeJmp(pVCpu, iSegReg, GCPtrMem, pu256Src);
|
---|
865 | #else
|
---|
866 | iemMemStoreDataU256NoAcJmp(pVCpu, iSegReg, GCPtrMem, pu256Src);
|
---|
867 | #endif
|
---|
868 | }
|
---|
869 |
|
---|
870 |
|
---|
871 | /**
|
---|
872 | * Used by TB code to store unsigned 256-bit data w/ segmentation.
|
---|
873 | */
|
---|
874 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemStoreDataU256AlignedAvx,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PCRTUINT256U pu256Src))
|
---|
875 | {
|
---|
876 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
877 | iemMemStoreDataU256AlignedAvxSafeJmp(pVCpu, iSegReg, GCPtrMem, pu256Src);
|
---|
878 | #else
|
---|
879 | iemMemStoreDataU256AlignedAvxJmp(pVCpu, iSegReg, GCPtrMem, pu256Src);
|
---|
880 | #endif
|
---|
881 | }
|
---|
882 | #endif
|
---|
883 |
|
---|
884 |
|
---|
885 |
|
---|
886 | /**
|
---|
887 | * Used by TB code to store an unsigned 16-bit value onto a generic stack.
|
---|
888 | */
|
---|
889 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackStoreU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t u16Value))
|
---|
890 | {
|
---|
891 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
|
---|
892 | iemMemStoreStackU16SafeJmp(pVCpu, GCPtrMem, u16Value);
|
---|
893 | #else
|
---|
894 | iemMemStoreStackU16Jmp(pVCpu, GCPtrMem, u16Value);
|
---|
895 | #endif
|
---|
896 | }
|
---|
897 |
|
---|
898 |
|
---|
899 | /**
|
---|
900 | * Used by TB code to store an unsigned 32-bit value onto a generic stack.
|
---|
901 | */
|
---|
902 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackStoreU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value))
|
---|
903 | {
|
---|
904 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
|
---|
905 | iemMemStoreStackU32SafeJmp(pVCpu, GCPtrMem, u32Value);
|
---|
906 | #else
|
---|
907 | iemMemStoreStackU32Jmp(pVCpu, GCPtrMem, u32Value);
|
---|
908 | #endif
|
---|
909 | }
|
---|
910 |
|
---|
911 |
|
---|
912 | /**
|
---|
913 | * Used by TB code to store an 32-bit selector value onto a generic stack.
|
---|
914 | *
|
---|
915 | * Intel CPUs doesn't do write a whole dword, thus the special function.
|
---|
916 | */
|
---|
917 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackStoreU32SReg,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value))
|
---|
918 | {
|
---|
919 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
|
---|
920 | iemMemStoreStackU32SRegSafeJmp(pVCpu, GCPtrMem, u32Value);
|
---|
921 | #else
|
---|
922 | iemMemStoreStackU32SRegJmp(pVCpu, GCPtrMem, u32Value);
|
---|
923 | #endif
|
---|
924 | }
|
---|
925 |
|
---|
926 |
|
---|
927 | /**
|
---|
928 | * Used by TB code to push unsigned 64-bit value onto a generic stack.
|
---|
929 | */
|
---|
930 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackStoreU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t u64Value))
|
---|
931 | {
|
---|
932 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
|
---|
933 | iemMemStoreStackU64SafeJmp(pVCpu, GCPtrMem, u64Value);
|
---|
934 | #else
|
---|
935 | iemMemStoreStackU64Jmp(pVCpu, GCPtrMem, u64Value);
|
---|
936 | #endif
|
---|
937 | }
|
---|
938 |
|
---|
939 |
|
---|
940 | /**
|
---|
941 | * Used by TB code to fetch an unsigned 16-bit item off a generic stack.
|
---|
942 | */
|
---|
943 | IEM_DECL_NATIVE_HLP_DEF(uint16_t, iemNativeHlpStackFetchU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
944 | {
|
---|
945 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_POP
|
---|
946 | return iemMemFetchStackU16SafeJmp(pVCpu, GCPtrMem);
|
---|
947 | #else
|
---|
948 | return iemMemFetchStackU16Jmp(pVCpu, GCPtrMem);
|
---|
949 | #endif
|
---|
950 | }
|
---|
951 |
|
---|
952 |
|
---|
953 | /**
|
---|
954 | * Used by TB code to fetch an unsigned 32-bit item off a generic stack.
|
---|
955 | */
|
---|
956 | IEM_DECL_NATIVE_HLP_DEF(uint32_t, iemNativeHlpStackFetchU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
957 | {
|
---|
958 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_POP
|
---|
959 | return iemMemFetchStackU32SafeJmp(pVCpu, GCPtrMem);
|
---|
960 | #else
|
---|
961 | return iemMemFetchStackU32Jmp(pVCpu, GCPtrMem);
|
---|
962 | #endif
|
---|
963 | }
|
---|
964 |
|
---|
965 |
|
---|
966 | /**
|
---|
967 | * Used by TB code to fetch an unsigned 64-bit item off a generic stack.
|
---|
968 | */
|
---|
969 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpStackFetchU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
970 | {
|
---|
971 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_POP
|
---|
972 | return iemMemFetchStackU64SafeJmp(pVCpu, GCPtrMem);
|
---|
973 | #else
|
---|
974 | return iemMemFetchStackU64Jmp(pVCpu, GCPtrMem);
|
---|
975 | #endif
|
---|
976 | }
|
---|
977 |
|
---|
978 |
|
---|
979 |
|
---|
980 | /*********************************************************************************************************************************
|
---|
981 | * Helpers: Flat memory fetches and stores. *
|
---|
982 | *********************************************************************************************************************************/
|
---|
983 |
|
---|
984 | /**
|
---|
985 | * Used by TB code to load unsigned 8-bit data w/ flat address.
|
---|
986 | * @note Zero extending the value to 64-bit to simplify assembly.
|
---|
987 | */
|
---|
988 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU8,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
989 | {
|
---|
990 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
991 | return (uint64_t)iemMemFetchDataU8SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
|
---|
992 | #else
|
---|
993 | return (uint64_t)iemMemFlatFetchDataU8Jmp(pVCpu, GCPtrMem);
|
---|
994 | #endif
|
---|
995 | }
|
---|
996 |
|
---|
997 |
|
---|
998 | /**
|
---|
999 | * Used by TB code to load signed 8-bit data w/ flat address, sign extending it
|
---|
1000 | * to 16 bits.
|
---|
1001 | * @note Zero extending the value to 64-bit to simplify assembly.
|
---|
1002 | */
|
---|
1003 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU8_Sx_U16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
1004 | {
|
---|
1005 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
1006 | return (uint64_t)(uint16_t)(int16_t)(int8_t)iemMemFetchDataU8SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
|
---|
1007 | #else
|
---|
1008 | return (uint64_t)(uint16_t)(int16_t)(int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, GCPtrMem);
|
---|
1009 | #endif
|
---|
1010 | }
|
---|
1011 |
|
---|
1012 |
|
---|
1013 | /**
|
---|
1014 | * Used by TB code to load signed 8-bit data w/ flat address, sign extending it
|
---|
1015 | * to 32 bits.
|
---|
1016 | * @note Zero extending the value to 64-bit to simplify assembly.
|
---|
1017 | */
|
---|
1018 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU8_Sx_U32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
1019 | {
|
---|
1020 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
1021 | return (uint64_t)(uint32_t)(int32_t)(int8_t)iemMemFetchDataU8SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
|
---|
1022 | #else
|
---|
1023 | return (uint64_t)(uint32_t)(int32_t)(int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, GCPtrMem);
|
---|
1024 | #endif
|
---|
1025 | }
|
---|
1026 |
|
---|
1027 |
|
---|
1028 | /**
|
---|
1029 | * Used by TB code to load signed 8-bit data w/ flat address, sign extending it
|
---|
1030 | * to 64 bits.
|
---|
1031 | */
|
---|
1032 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU8_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
1033 | {
|
---|
1034 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
1035 | return (uint64_t)(int64_t)(int8_t)iemMemFetchDataU8SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
|
---|
1036 | #else
|
---|
1037 | return (uint64_t)(int64_t)(int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, GCPtrMem);
|
---|
1038 | #endif
|
---|
1039 | }
|
---|
1040 |
|
---|
1041 |
|
---|
1042 | /**
|
---|
1043 | * Used by TB code to load unsigned 16-bit data w/ flat address.
|
---|
1044 | * @note Zero extending the value to 64-bit to simplify assembly.
|
---|
1045 | */
|
---|
1046 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
1047 | {
|
---|
1048 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
1049 | return (uint64_t)iemMemFetchDataU16SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
|
---|
1050 | #else
|
---|
1051 | return (uint64_t)iemMemFlatFetchDataU16Jmp(pVCpu, GCPtrMem);
|
---|
1052 | #endif
|
---|
1053 | }
|
---|
1054 |
|
---|
1055 |
|
---|
1056 | /**
|
---|
1057 | * Used by TB code to load signed 16-bit data w/ flat address, sign extending it
|
---|
1058 | * to 32 bits.
|
---|
1059 | * @note Zero extending the value to 64-bit to simplify assembly.
|
---|
1060 | */
|
---|
1061 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU16_Sx_U32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
1062 | {
|
---|
1063 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
1064 | return (uint64_t)(uint32_t)(int32_t)(int16_t)iemMemFetchDataU16SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
|
---|
1065 | #else
|
---|
1066 | return (uint64_t)(uint32_t)(int32_t)(int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, GCPtrMem);
|
---|
1067 | #endif
|
---|
1068 | }
|
---|
1069 |
|
---|
1070 |
|
---|
1071 | /**
|
---|
1072 | * Used by TB code to load signed 16-bit data w/ flat address, sign extending it
|
---|
1073 | * to 64 bits.
|
---|
1074 | * @note Zero extending the value to 64-bit to simplify assembly.
|
---|
1075 | */
|
---|
1076 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU16_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
1077 | {
|
---|
1078 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
1079 | return (uint64_t)(int64_t)(int16_t)iemMemFetchDataU16SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
|
---|
1080 | #else
|
---|
1081 | return (uint64_t)(int64_t)(int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, GCPtrMem);
|
---|
1082 | #endif
|
---|
1083 | }
|
---|
1084 |
|
---|
1085 |
|
---|
1086 | /**
|
---|
1087 | * Used by TB code to load unsigned 32-bit data w/ flat address.
|
---|
1088 | * @note Zero extending the value to 64-bit to simplify assembly.
|
---|
1089 | */
|
---|
1090 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
1091 | {
|
---|
1092 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
1093 | return (uint64_t)iemMemFetchDataU32SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
|
---|
1094 | #else
|
---|
1095 | return (uint64_t)iemMemFlatFetchDataU32Jmp(pVCpu, GCPtrMem);
|
---|
1096 | #endif
|
---|
1097 | }
|
---|
1098 |
|
---|
1099 |
|
---|
1100 | /**
|
---|
1101 | * Used by TB code to load signed 32-bit data w/ flat address, sign extending it
|
---|
1102 | * to 64 bits.
|
---|
1103 | * @note Zero extending the value to 64-bit to simplify assembly.
|
---|
1104 | */
|
---|
1105 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU32_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
1106 | {
|
---|
1107 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
1108 | return (uint64_t)(int64_t)(int32_t)iemMemFetchDataU32SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
|
---|
1109 | #else
|
---|
1110 | return (uint64_t)(int64_t)(int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, GCPtrMem);
|
---|
1111 | #endif
|
---|
1112 | }
|
---|
1113 |
|
---|
1114 |
|
---|
1115 | /**
|
---|
1116 | * Used by TB code to load unsigned 64-bit data w/ flat address.
|
---|
1117 | */
|
---|
1118 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
1119 | {
|
---|
1120 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
1121 | return iemMemFetchDataU64SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
|
---|
1122 | #else
|
---|
1123 | return iemMemFlatFetchDataU64Jmp(pVCpu, GCPtrMem);
|
---|
1124 | #endif
|
---|
1125 | }
|
---|
1126 |
|
---|
1127 |
|
---|
1128 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1129 | /**
|
---|
1130 | * Used by TB code to load unsigned 128-bit data w/ flat address.
|
---|
1131 | */
|
---|
1132 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatFetchDataU128,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PRTUINT128U pu128Dst))
|
---|
1133 | {
|
---|
1134 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
1135 | return iemMemFetchDataU128SafeJmp(pVCpu, pu128Dst, UINT8_MAX, GCPtrMem);
|
---|
1136 | #else
|
---|
1137 | return iemMemFlatFetchDataU128Jmp(pVCpu, pu128Dst, GCPtrMem);
|
---|
1138 | #endif
|
---|
1139 | }
|
---|
1140 |
|
---|
1141 |
|
---|
1142 | /**
|
---|
1143 | * Used by TB code to load unsigned 128-bit data w/ flat address.
|
---|
1144 | */
|
---|
1145 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatFetchDataU128AlignedSse,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PRTUINT128U pu128Dst))
|
---|
1146 | {
|
---|
1147 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
1148 | return iemMemFetchDataU128AlignedSseSafeJmp(pVCpu, pu128Dst, UINT8_MAX, GCPtrMem);
|
---|
1149 | #else
|
---|
1150 | return iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, pu128Dst, GCPtrMem);
|
---|
1151 | #endif
|
---|
1152 | }
|
---|
1153 |
|
---|
1154 |
|
---|
1155 | /**
|
---|
1156 | * Used by TB code to load unsigned 128-bit data w/ flat address.
|
---|
1157 | */
|
---|
1158 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatFetchDataU128NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PRTUINT128U pu128Dst))
|
---|
1159 | {
|
---|
1160 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
1161 | return iemMemFetchDataU128NoAcSafeJmp(pVCpu, pu128Dst, UINT8_MAX, GCPtrMem);
|
---|
1162 | #else
|
---|
1163 | return iemMemFlatFetchDataU128NoAcJmp(pVCpu, pu128Dst, GCPtrMem);
|
---|
1164 | #endif
|
---|
1165 | }
|
---|
1166 |
|
---|
1167 |
|
---|
1168 | /**
|
---|
1169 | * Used by TB code to load unsigned 256-bit data w/ flat address.
|
---|
1170 | */
|
---|
1171 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatFetchDataU256NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PRTUINT256U pu256Dst))
|
---|
1172 | {
|
---|
1173 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
1174 | return iemMemFetchDataU256NoAcSafeJmp(pVCpu, pu256Dst, UINT8_MAX, GCPtrMem);
|
---|
1175 | #else
|
---|
1176 | return iemMemFlatFetchDataU256NoAcJmp(pVCpu, pu256Dst, GCPtrMem);
|
---|
1177 | #endif
|
---|
1178 | }
|
---|
1179 |
|
---|
1180 |
|
---|
1181 | /**
|
---|
1182 | * Used by TB code to load unsigned 256-bit data w/ flat address.
|
---|
1183 | */
|
---|
1184 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatFetchDataU256AlignedAvx,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PRTUINT256U pu256Dst))
|
---|
1185 | {
|
---|
1186 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
|
---|
1187 | return iemMemFetchDataU256AlignedAvxSafeJmp(pVCpu, pu256Dst, UINT8_MAX, GCPtrMem);
|
---|
1188 | #else
|
---|
1189 | return iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, pu256Dst, GCPtrMem);
|
---|
1190 | #endif
|
---|
1191 | }
|
---|
1192 | #endif
|
---|
1193 |
|
---|
1194 |
|
---|
1195 | /**
|
---|
1196 | * Used by TB code to store unsigned 8-bit data w/ flat address.
|
---|
1197 | */
|
---|
1198 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatStoreDataU8,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t u8Value))
|
---|
1199 | {
|
---|
1200 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
1201 | iemMemStoreDataU8SafeJmp(pVCpu, UINT8_MAX, GCPtrMem, u8Value);
|
---|
1202 | #else
|
---|
1203 | iemMemFlatStoreDataU8Jmp(pVCpu, GCPtrMem, u8Value);
|
---|
1204 | #endif
|
---|
1205 | }
|
---|
1206 |
|
---|
1207 |
|
---|
1208 | /**
|
---|
1209 | * Used by TB code to store unsigned 16-bit data w/ flat address.
|
---|
1210 | */
|
---|
1211 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatStoreDataU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t u16Value))
|
---|
1212 | {
|
---|
1213 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
1214 | iemMemStoreDataU16SafeJmp(pVCpu, UINT8_MAX, GCPtrMem, u16Value);
|
---|
1215 | #else
|
---|
1216 | iemMemFlatStoreDataU16Jmp(pVCpu, GCPtrMem, u16Value);
|
---|
1217 | #endif
|
---|
1218 | }
|
---|
1219 |
|
---|
1220 |
|
---|
1221 | /**
|
---|
1222 | * Used by TB code to store unsigned 32-bit data w/ flat address.
|
---|
1223 | */
|
---|
1224 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatStoreDataU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value))
|
---|
1225 | {
|
---|
1226 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
1227 | iemMemStoreDataU32SafeJmp(pVCpu, UINT8_MAX, GCPtrMem, u32Value);
|
---|
1228 | #else
|
---|
1229 | iemMemFlatStoreDataU32Jmp(pVCpu, GCPtrMem, u32Value);
|
---|
1230 | #endif
|
---|
1231 | }
|
---|
1232 |
|
---|
1233 |
|
---|
1234 | /**
|
---|
1235 | * Used by TB code to store unsigned 64-bit data w/ flat address.
|
---|
1236 | */
|
---|
1237 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatStoreDataU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t u64Value))
|
---|
1238 | {
|
---|
1239 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
1240 | iemMemStoreDataU64SafeJmp(pVCpu, UINT8_MAX, GCPtrMem, u64Value);
|
---|
1241 | #else
|
---|
1242 | iemMemFlatStoreDataU64Jmp(pVCpu, GCPtrMem, u64Value);
|
---|
1243 | #endif
|
---|
1244 | }
|
---|
1245 |
|
---|
1246 |
|
---|
1247 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
1248 | /**
|
---|
1249 | * Used by TB code to store unsigned 128-bit data w/ flat address.
|
---|
1250 | */
|
---|
1251 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatStoreDataU128AlignedSse,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PCRTUINT128U pu128Src))
|
---|
1252 | {
|
---|
1253 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
1254 | iemMemStoreDataU128AlignedSseSafeJmp(pVCpu, UINT8_MAX, GCPtrMem, pu128Src);
|
---|
1255 | #else
|
---|
1256 | iemMemFlatStoreDataU128AlignedSseJmp(pVCpu, GCPtrMem, pu128Src);
|
---|
1257 | #endif
|
---|
1258 | }
|
---|
1259 |
|
---|
1260 |
|
---|
1261 | /**
|
---|
1262 | * Used by TB code to store unsigned 128-bit data w/ flat address.
|
---|
1263 | */
|
---|
1264 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatStoreDataU128NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PCRTUINT128U pu128Src))
|
---|
1265 | {
|
---|
1266 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
1267 | iemMemStoreDataU128NoAcSafeJmp(pVCpu, UINT8_MAX, GCPtrMem, pu128Src);
|
---|
1268 | #else
|
---|
1269 | iemMemFlatStoreDataU128NoAcJmp(pVCpu, GCPtrMem, pu128Src);
|
---|
1270 | #endif
|
---|
1271 | }
|
---|
1272 |
|
---|
1273 |
|
---|
1274 | /**
|
---|
1275 | * Used by TB code to store unsigned 256-bit data w/ flat address.
|
---|
1276 | */
|
---|
1277 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatStoreDataU256NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PCRTUINT256U pu256Src))
|
---|
1278 | {
|
---|
1279 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
1280 | iemMemStoreDataU256NoAcSafeJmp(pVCpu, UINT8_MAX, GCPtrMem, pu256Src);
|
---|
1281 | #else
|
---|
1282 | iemMemFlatStoreDataU256NoAcJmp(pVCpu, GCPtrMem, pu256Src);
|
---|
1283 | #endif
|
---|
1284 | }
|
---|
1285 |
|
---|
1286 |
|
---|
1287 | /**
|
---|
1288 | * Used by TB code to store unsigned 256-bit data w/ flat address.
|
---|
1289 | */
|
---|
1290 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatStoreDataU256AlignedAvx,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PCRTUINT256U pu256Src))
|
---|
1291 | {
|
---|
1292 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
|
---|
1293 | iemMemStoreDataU256AlignedAvxSafeJmp(pVCpu, UINT8_MAX, GCPtrMem, pu256Src);
|
---|
1294 | #else
|
---|
1295 | iemMemFlatStoreDataU256AlignedAvxJmp(pVCpu, GCPtrMem, pu256Src);
|
---|
1296 | #endif
|
---|
1297 | }
|
---|
1298 | #endif
|
---|
1299 |
|
---|
1300 |
|
---|
1301 |
|
---|
1302 | /**
|
---|
1303 | * Used by TB code to store an unsigned 16-bit value onto a flat stack.
|
---|
1304 | */
|
---|
1305 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlatStoreU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t u16Value))
|
---|
1306 | {
|
---|
1307 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
|
---|
1308 | iemMemStoreStackU16SafeJmp(pVCpu, GCPtrMem, u16Value);
|
---|
1309 | #else
|
---|
1310 | iemMemFlatStoreStackU16Jmp(pVCpu, GCPtrMem, u16Value);
|
---|
1311 | #endif
|
---|
1312 | }
|
---|
1313 |
|
---|
1314 |
|
---|
1315 | /**
|
---|
1316 | * Used by TB code to store an unsigned 32-bit value onto a flat stack.
|
---|
1317 | */
|
---|
1318 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlatStoreU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value))
|
---|
1319 | {
|
---|
1320 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
|
---|
1321 | iemMemStoreStackU32SafeJmp(pVCpu, GCPtrMem, u32Value);
|
---|
1322 | #else
|
---|
1323 | iemMemFlatStoreStackU32Jmp(pVCpu, GCPtrMem, u32Value);
|
---|
1324 | #endif
|
---|
1325 | }
|
---|
1326 |
|
---|
1327 |
|
---|
1328 | /**
|
---|
1329 | * Used by TB code to store a segment selector value onto a flat stack.
|
---|
1330 | *
|
---|
1331 | * Intel CPUs doesn't do write a whole dword, thus the special function.
|
---|
1332 | */
|
---|
1333 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlatStoreU32SReg,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value))
|
---|
1334 | {
|
---|
1335 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
|
---|
1336 | iemMemStoreStackU32SRegSafeJmp(pVCpu, GCPtrMem, u32Value);
|
---|
1337 | #else
|
---|
1338 | iemMemFlatStoreStackU32SRegJmp(pVCpu, GCPtrMem, u32Value);
|
---|
1339 | #endif
|
---|
1340 | }
|
---|
1341 |
|
---|
1342 |
|
---|
1343 | /**
|
---|
1344 | * Used by TB code to store an unsigned 64-bit value onto a flat stack.
|
---|
1345 | */
|
---|
1346 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlatStoreU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t u64Value))
|
---|
1347 | {
|
---|
1348 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
|
---|
1349 | iemMemStoreStackU64SafeJmp(pVCpu, GCPtrMem, u64Value);
|
---|
1350 | #else
|
---|
1351 | iemMemFlatStoreStackU64Jmp(pVCpu, GCPtrMem, u64Value);
|
---|
1352 | #endif
|
---|
1353 | }
|
---|
1354 |
|
---|
1355 |
|
---|
1356 | /**
|
---|
1357 | * Used by TB code to fetch an unsigned 16-bit item off a generic stack.
|
---|
1358 | */
|
---|
1359 | IEM_DECL_NATIVE_HLP_DEF(uint16_t, iemNativeHlpStackFlatFetchU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
1360 | {
|
---|
1361 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_POP
|
---|
1362 | return iemMemFetchStackU16SafeJmp(pVCpu, GCPtrMem);
|
---|
1363 | #else
|
---|
1364 | return iemMemFlatFetchStackU16Jmp(pVCpu, GCPtrMem);
|
---|
1365 | #endif
|
---|
1366 | }
|
---|
1367 |
|
---|
1368 |
|
---|
1369 | /**
|
---|
1370 | * Used by TB code to fetch an unsigned 32-bit item off a generic stack.
|
---|
1371 | */
|
---|
1372 | IEM_DECL_NATIVE_HLP_DEF(uint32_t, iemNativeHlpStackFlatFetchU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
1373 | {
|
---|
1374 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_POP
|
---|
1375 | return iemMemFetchStackU32SafeJmp(pVCpu, GCPtrMem);
|
---|
1376 | #else
|
---|
1377 | return iemMemFlatFetchStackU32Jmp(pVCpu, GCPtrMem);
|
---|
1378 | #endif
|
---|
1379 | }
|
---|
1380 |
|
---|
1381 |
|
---|
1382 | /**
|
---|
1383 | * Used by TB code to fetch an unsigned 64-bit item off a generic stack.
|
---|
1384 | */
|
---|
1385 | IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpStackFlatFetchU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
|
---|
1386 | {
|
---|
1387 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_POP
|
---|
1388 | return iemMemFetchStackU64SafeJmp(pVCpu, GCPtrMem);
|
---|
1389 | #else
|
---|
1390 | return iemMemFlatFetchStackU64Jmp(pVCpu, GCPtrMem);
|
---|
1391 | #endif
|
---|
1392 | }
|
---|
1393 |
|
---|
1394 |
|
---|
1395 |
|
---|
1396 | /*********************************************************************************************************************************
|
---|
1397 | * Helpers: Segmented memory mapping. *
|
---|
1398 | *********************************************************************************************************************************/
|
---|
1399 |
|
---|
1400 | /**
|
---|
1401 | * Used by TB code to map unsigned 8-bit data for atomic read-write w/
|
---|
1402 | * segmentation.
|
---|
1403 | */
|
---|
1404 | IEM_DECL_NATIVE_HLP_DEF(uint8_t *, iemNativeHlpMemMapDataU8Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1405 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1406 | {
|
---|
1407 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1408 | return iemMemMapDataU8AtSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1409 | #else
|
---|
1410 | return iemMemMapDataU8AtJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1411 | #endif
|
---|
1412 | }
|
---|
1413 |
|
---|
1414 |
|
---|
1415 | /**
|
---|
1416 | * Used by TB code to map unsigned 8-bit data read-write w/ segmentation.
|
---|
1417 | */
|
---|
1418 | IEM_DECL_NATIVE_HLP_DEF(uint8_t *, iemNativeHlpMemMapDataU8Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1419 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1420 | {
|
---|
1421 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1422 | return iemMemMapDataU8RwSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1423 | #else
|
---|
1424 | return iemMemMapDataU8RwJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1425 | #endif
|
---|
1426 | }
|
---|
1427 |
|
---|
1428 |
|
---|
1429 | /**
|
---|
1430 | * Used by TB code to map unsigned 8-bit data writeonly w/ segmentation.
|
---|
1431 | */
|
---|
1432 | IEM_DECL_NATIVE_HLP_DEF(uint8_t *, iemNativeHlpMemMapDataU8Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1433 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1434 | {
|
---|
1435 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1436 | return iemMemMapDataU8WoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1437 | #else
|
---|
1438 | return iemMemMapDataU8WoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1439 | #endif
|
---|
1440 | }
|
---|
1441 |
|
---|
1442 |
|
---|
1443 | /**
|
---|
1444 | * Used by TB code to map unsigned 8-bit data readonly w/ segmentation.
|
---|
1445 | */
|
---|
1446 | IEM_DECL_NATIVE_HLP_DEF(uint8_t const *, iemNativeHlpMemMapDataU8Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1447 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1448 | {
|
---|
1449 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1450 | return iemMemMapDataU8RoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1451 | #else
|
---|
1452 | return iemMemMapDataU8RoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1453 | #endif
|
---|
1454 | }
|
---|
1455 |
|
---|
1456 |
|
---|
1457 | /**
|
---|
1458 | * Used by TB code to map unsigned 16-bit data for atomic read-write w/
|
---|
1459 | * segmentation.
|
---|
1460 | */
|
---|
1461 | IEM_DECL_NATIVE_HLP_DEF(uint16_t *, iemNativeHlpMemMapDataU16Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1462 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1463 | {
|
---|
1464 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1465 | return iemMemMapDataU16AtSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1466 | #else
|
---|
1467 | return iemMemMapDataU16AtJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1468 | #endif
|
---|
1469 | }
|
---|
1470 |
|
---|
1471 |
|
---|
1472 | /**
|
---|
1473 | * Used by TB code to map unsigned 16-bit data read-write w/ segmentation.
|
---|
1474 | */
|
---|
1475 | IEM_DECL_NATIVE_HLP_DEF(uint16_t *, iemNativeHlpMemMapDataU16Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1476 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1477 | {
|
---|
1478 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1479 | return iemMemMapDataU16RwSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1480 | #else
|
---|
1481 | return iemMemMapDataU16RwJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1482 | #endif
|
---|
1483 | }
|
---|
1484 |
|
---|
1485 |
|
---|
1486 | /**
|
---|
1487 | * Used by TB code to map unsigned 16-bit data writeonly w/ segmentation.
|
---|
1488 | */
|
---|
1489 | IEM_DECL_NATIVE_HLP_DEF(uint16_t *, iemNativeHlpMemMapDataU16Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1490 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1491 | {
|
---|
1492 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1493 | return iemMemMapDataU16WoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1494 | #else
|
---|
1495 | return iemMemMapDataU16WoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1496 | #endif
|
---|
1497 | }
|
---|
1498 |
|
---|
1499 |
|
---|
1500 | /**
|
---|
1501 | * Used by TB code to map unsigned 16-bit data readonly w/ segmentation.
|
---|
1502 | */
|
---|
1503 | IEM_DECL_NATIVE_HLP_DEF(uint16_t const *, iemNativeHlpMemMapDataU16Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1504 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1505 | {
|
---|
1506 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1507 | return iemMemMapDataU16RoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1508 | #else
|
---|
1509 | return iemMemMapDataU16RoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1510 | #endif
|
---|
1511 | }
|
---|
1512 |
|
---|
1513 |
|
---|
1514 | /**
|
---|
1515 | * Used by TB code to map unsigned 32-bit data for atomic read-write w/
|
---|
1516 | * segmentation.
|
---|
1517 | */
|
---|
1518 | IEM_DECL_NATIVE_HLP_DEF(uint32_t *, iemNativeHlpMemMapDataU32Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1519 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1520 | {
|
---|
1521 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1522 | return iemMemMapDataU32AtSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1523 | #else
|
---|
1524 | return iemMemMapDataU32AtJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1525 | #endif
|
---|
1526 | }
|
---|
1527 |
|
---|
1528 |
|
---|
1529 | /**
|
---|
1530 | * Used by TB code to map unsigned 32-bit data read-write w/ segmentation.
|
---|
1531 | */
|
---|
1532 | IEM_DECL_NATIVE_HLP_DEF(uint32_t *, iemNativeHlpMemMapDataU32Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1533 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1534 | {
|
---|
1535 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1536 | return iemMemMapDataU32RwSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1537 | #else
|
---|
1538 | return iemMemMapDataU32RwJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1539 | #endif
|
---|
1540 | }
|
---|
1541 |
|
---|
1542 |
|
---|
1543 | /**
|
---|
1544 | * Used by TB code to map unsigned 32-bit data writeonly w/ segmentation.
|
---|
1545 | */
|
---|
1546 | IEM_DECL_NATIVE_HLP_DEF(uint32_t *, iemNativeHlpMemMapDataU32Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1547 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1548 | {
|
---|
1549 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1550 | return iemMemMapDataU32WoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1551 | #else
|
---|
1552 | return iemMemMapDataU32WoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1553 | #endif
|
---|
1554 | }
|
---|
1555 |
|
---|
1556 |
|
---|
1557 | /**
|
---|
1558 | * Used by TB code to map unsigned 32-bit data readonly w/ segmentation.
|
---|
1559 | */
|
---|
1560 | IEM_DECL_NATIVE_HLP_DEF(uint32_t const *, iemNativeHlpMemMapDataU32Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1561 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1562 | {
|
---|
1563 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1564 | return iemMemMapDataU32RoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1565 | #else
|
---|
1566 | return iemMemMapDataU32RoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1567 | #endif
|
---|
1568 | }
|
---|
1569 |
|
---|
1570 |
|
---|
1571 | /**
|
---|
1572 | * Used by TB code to map unsigned 64-bit data for atomic read-write w/
|
---|
1573 | * segmentation.
|
---|
1574 | */
|
---|
1575 | IEM_DECL_NATIVE_HLP_DEF(uint64_t *, iemNativeHlpMemMapDataU64Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1576 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1577 | {
|
---|
1578 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1579 | return iemMemMapDataU64AtSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1580 | #else
|
---|
1581 | return iemMemMapDataU64AtJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1582 | #endif
|
---|
1583 | }
|
---|
1584 |
|
---|
1585 |
|
---|
1586 | /**
|
---|
1587 | * Used by TB code to map unsigned 64-bit data read-write w/ segmentation.
|
---|
1588 | */
|
---|
1589 | IEM_DECL_NATIVE_HLP_DEF(uint64_t *, iemNativeHlpMemMapDataU64Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1590 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1591 | {
|
---|
1592 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1593 | return iemMemMapDataU64RwSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1594 | #else
|
---|
1595 | return iemMemMapDataU64RwJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1596 | #endif
|
---|
1597 | }
|
---|
1598 |
|
---|
1599 |
|
---|
1600 | /**
|
---|
1601 | * Used by TB code to map unsigned 64-bit data writeonly w/ segmentation.
|
---|
1602 | */
|
---|
1603 | IEM_DECL_NATIVE_HLP_DEF(uint64_t *, iemNativeHlpMemMapDataU64Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1604 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1605 | {
|
---|
1606 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1607 | return iemMemMapDataU64WoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1608 | #else
|
---|
1609 | return iemMemMapDataU64WoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1610 | #endif
|
---|
1611 | }
|
---|
1612 |
|
---|
1613 |
|
---|
1614 | /**
|
---|
1615 | * Used by TB code to map unsigned 64-bit data readonly w/ segmentation.
|
---|
1616 | */
|
---|
1617 | IEM_DECL_NATIVE_HLP_DEF(uint64_t const *, iemNativeHlpMemMapDataU64Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1618 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1619 | {
|
---|
1620 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1621 | return iemMemMapDataU64RoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1622 | #else
|
---|
1623 | return iemMemMapDataU64RoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1624 | #endif
|
---|
1625 | }
|
---|
1626 |
|
---|
1627 |
|
---|
1628 | /**
|
---|
1629 | * Used by TB code to map 80-bit float data writeonly w/ segmentation.
|
---|
1630 | */
|
---|
1631 | IEM_DECL_NATIVE_HLP_DEF(RTFLOAT80U *, iemNativeHlpMemMapDataR80Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1632 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1633 | {
|
---|
1634 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1635 | return iemMemMapDataR80WoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1636 | #else
|
---|
1637 | return iemMemMapDataR80WoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1638 | #endif
|
---|
1639 | }
|
---|
1640 |
|
---|
1641 |
|
---|
1642 | /**
|
---|
1643 | * Used by TB code to map 80-bit BCD data writeonly w/ segmentation.
|
---|
1644 | */
|
---|
1645 | IEM_DECL_NATIVE_HLP_DEF(RTPBCD80U *, iemNativeHlpMemMapDataD80Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1646 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1647 | {
|
---|
1648 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1649 | return iemMemMapDataD80WoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1650 | #else
|
---|
1651 | return iemMemMapDataD80WoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1652 | #endif
|
---|
1653 | }
|
---|
1654 |
|
---|
1655 |
|
---|
1656 | /**
|
---|
1657 | * Used by TB code to map unsigned 128-bit data for atomic read-write w/
|
---|
1658 | * segmentation.
|
---|
1659 | */
|
---|
1660 | IEM_DECL_NATIVE_HLP_DEF(RTUINT128U *, iemNativeHlpMemMapDataU128Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1661 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1662 | {
|
---|
1663 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1664 | return iemMemMapDataU128AtSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1665 | #else
|
---|
1666 | return iemMemMapDataU128AtJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1667 | #endif
|
---|
1668 | }
|
---|
1669 |
|
---|
1670 |
|
---|
1671 | /**
|
---|
1672 | * Used by TB code to map unsigned 128-bit data read-write w/ segmentation.
|
---|
1673 | */
|
---|
1674 | IEM_DECL_NATIVE_HLP_DEF(RTUINT128U *, iemNativeHlpMemMapDataU128Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1675 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1676 | {
|
---|
1677 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1678 | return iemMemMapDataU128RwSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1679 | #else
|
---|
1680 | return iemMemMapDataU128RwJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1681 | #endif
|
---|
1682 | }
|
---|
1683 |
|
---|
1684 |
|
---|
1685 | /**
|
---|
1686 | * Used by TB code to map unsigned 128-bit data writeonly w/ segmentation.
|
---|
1687 | */
|
---|
1688 | IEM_DECL_NATIVE_HLP_DEF(RTUINT128U *, iemNativeHlpMemMapDataU128Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1689 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1690 | {
|
---|
1691 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1692 | return iemMemMapDataU128WoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1693 | #else
|
---|
1694 | return iemMemMapDataU128WoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1695 | #endif
|
---|
1696 | }
|
---|
1697 |
|
---|
1698 |
|
---|
1699 | /**
|
---|
1700 | * Used by TB code to map unsigned 128-bit data readonly w/ segmentation.
|
---|
1701 | */
|
---|
1702 | IEM_DECL_NATIVE_HLP_DEF(RTUINT128U const *, iemNativeHlpMemMapDataU128Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
|
---|
1703 | RTGCPTR GCPtrMem, uint8_t iSegReg))
|
---|
1704 | {
|
---|
1705 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1706 | return iemMemMapDataU128RoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1707 | #else
|
---|
1708 | return iemMemMapDataU128RoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
|
---|
1709 | #endif
|
---|
1710 | }
|
---|
1711 |
|
---|
1712 |
|
---|
1713 | /*********************************************************************************************************************************
|
---|
1714 | * Helpers: Flat memory mapping. *
|
---|
1715 | *********************************************************************************************************************************/
|
---|
1716 |
|
---|
1717 | /**
|
---|
1718 | * Used by TB code to map unsigned 8-bit data for atomic read-write w/ flat
|
---|
1719 | * address.
|
---|
1720 | */
|
---|
1721 | IEM_DECL_NATIVE_HLP_DEF(uint8_t *, iemNativeHlpMemFlatMapDataU8Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1722 | {
|
---|
1723 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1724 | return iemMemMapDataU8AtSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1725 | #else
|
---|
1726 | return iemMemFlatMapDataU8AtJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1727 | #endif
|
---|
1728 | }
|
---|
1729 |
|
---|
1730 |
|
---|
1731 | /**
|
---|
1732 | * Used by TB code to map unsigned 8-bit data read-write w/ flat address.
|
---|
1733 | */
|
---|
1734 | IEM_DECL_NATIVE_HLP_DEF(uint8_t *, iemNativeHlpMemFlatMapDataU8Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1735 | {
|
---|
1736 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1737 | return iemMemMapDataU8RwSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1738 | #else
|
---|
1739 | return iemMemFlatMapDataU8RwJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1740 | #endif
|
---|
1741 | }
|
---|
1742 |
|
---|
1743 |
|
---|
1744 | /**
|
---|
1745 | * Used by TB code to map unsigned 8-bit data writeonly w/ flat address.
|
---|
1746 | */
|
---|
1747 | IEM_DECL_NATIVE_HLP_DEF(uint8_t *, iemNativeHlpMemFlatMapDataU8Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1748 | {
|
---|
1749 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1750 | return iemMemMapDataU8WoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1751 | #else
|
---|
1752 | return iemMemFlatMapDataU8WoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1753 | #endif
|
---|
1754 | }
|
---|
1755 |
|
---|
1756 |
|
---|
1757 | /**
|
---|
1758 | * Used by TB code to map unsigned 8-bit data readonly w/ flat address.
|
---|
1759 | */
|
---|
1760 | IEM_DECL_NATIVE_HLP_DEF(uint8_t const *, iemNativeHlpMemFlatMapDataU8Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1761 | {
|
---|
1762 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1763 | return iemMemMapDataU8RoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1764 | #else
|
---|
1765 | return iemMemFlatMapDataU8RoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1766 | #endif
|
---|
1767 | }
|
---|
1768 |
|
---|
1769 |
|
---|
1770 | /**
|
---|
1771 | * Used by TB code to map unsigned 16-bit data for atomic read-write w/ flat
|
---|
1772 | * address.
|
---|
1773 | */
|
---|
1774 | IEM_DECL_NATIVE_HLP_DEF(uint16_t *, iemNativeHlpMemFlatMapDataU16Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1775 | {
|
---|
1776 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1777 | return iemMemMapDataU16AtSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1778 | #else
|
---|
1779 | return iemMemFlatMapDataU16AtJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1780 | #endif
|
---|
1781 | }
|
---|
1782 |
|
---|
1783 |
|
---|
1784 | /**
|
---|
1785 | * Used by TB code to map unsigned 16-bit data read-write w/ flat address.
|
---|
1786 | */
|
---|
1787 | IEM_DECL_NATIVE_HLP_DEF(uint16_t *, iemNativeHlpMemFlatMapDataU16Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1788 | {
|
---|
1789 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1790 | return iemMemMapDataU16RwSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1791 | #else
|
---|
1792 | return iemMemFlatMapDataU16RwJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1793 | #endif
|
---|
1794 | }
|
---|
1795 |
|
---|
1796 |
|
---|
1797 | /**
|
---|
1798 | * Used by TB code to map unsigned 16-bit data writeonly w/ flat address.
|
---|
1799 | */
|
---|
1800 | IEM_DECL_NATIVE_HLP_DEF(uint16_t *, iemNativeHlpMemFlatMapDataU16Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1801 | {
|
---|
1802 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1803 | return iemMemMapDataU16WoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1804 | #else
|
---|
1805 | return iemMemFlatMapDataU16WoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1806 | #endif
|
---|
1807 | }
|
---|
1808 |
|
---|
1809 |
|
---|
1810 | /**
|
---|
1811 | * Used by TB code to map unsigned 16-bit data readonly w/ flat address.
|
---|
1812 | */
|
---|
1813 | IEM_DECL_NATIVE_HLP_DEF(uint16_t const *, iemNativeHlpMemFlatMapDataU16Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1814 | {
|
---|
1815 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1816 | return iemMemMapDataU16RoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1817 | #else
|
---|
1818 | return iemMemFlatMapDataU16RoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1819 | #endif
|
---|
1820 | }
|
---|
1821 |
|
---|
1822 |
|
---|
1823 | /**
|
---|
1824 | * Used by TB code to map unsigned 32-bit data for atomic read-write w/ flat
|
---|
1825 | * address.
|
---|
1826 | */
|
---|
1827 | IEM_DECL_NATIVE_HLP_DEF(uint32_t *, iemNativeHlpMemFlatMapDataU32Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1828 | {
|
---|
1829 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1830 | return iemMemMapDataU32AtSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1831 | #else
|
---|
1832 | return iemMemFlatMapDataU32AtJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1833 | #endif
|
---|
1834 | }
|
---|
1835 |
|
---|
1836 |
|
---|
1837 | /**
|
---|
1838 | * Used by TB code to map unsigned 32-bit data read-write w/ flat address.
|
---|
1839 | */
|
---|
1840 | IEM_DECL_NATIVE_HLP_DEF(uint32_t *, iemNativeHlpMemFlatMapDataU32Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1841 | {
|
---|
1842 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1843 | return iemMemMapDataU32RwSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1844 | #else
|
---|
1845 | return iemMemFlatMapDataU32RwJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1846 | #endif
|
---|
1847 | }
|
---|
1848 |
|
---|
1849 |
|
---|
1850 | /**
|
---|
1851 | * Used by TB code to map unsigned 32-bit data writeonly w/ flat address.
|
---|
1852 | */
|
---|
1853 | IEM_DECL_NATIVE_HLP_DEF(uint32_t *, iemNativeHlpMemFlatMapDataU32Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1854 | {
|
---|
1855 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1856 | return iemMemMapDataU32WoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1857 | #else
|
---|
1858 | return iemMemFlatMapDataU32WoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1859 | #endif
|
---|
1860 | }
|
---|
1861 |
|
---|
1862 |
|
---|
1863 | /**
|
---|
1864 | * Used by TB code to map unsigned 32-bit data readonly w/ flat address.
|
---|
1865 | */
|
---|
1866 | IEM_DECL_NATIVE_HLP_DEF(uint32_t const *, iemNativeHlpMemFlatMapDataU32Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1867 | {
|
---|
1868 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1869 | return iemMemMapDataU32RoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1870 | #else
|
---|
1871 | return iemMemFlatMapDataU32RoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1872 | #endif
|
---|
1873 | }
|
---|
1874 |
|
---|
1875 |
|
---|
1876 | /**
|
---|
1877 | * Used by TB code to map unsigned 64-bit data for atomic read-write w/ flat
|
---|
1878 | * address.
|
---|
1879 | */
|
---|
1880 | IEM_DECL_NATIVE_HLP_DEF(uint64_t *, iemNativeHlpMemFlatMapDataU64Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1881 | {
|
---|
1882 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1883 | return iemMemMapDataU64AtSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1884 | #else
|
---|
1885 | return iemMemFlatMapDataU64AtJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1886 | #endif
|
---|
1887 | }
|
---|
1888 |
|
---|
1889 |
|
---|
1890 | /**
|
---|
1891 | * Used by TB code to map unsigned 64-bit data read-write w/ flat address.
|
---|
1892 | */
|
---|
1893 | IEM_DECL_NATIVE_HLP_DEF(uint64_t *, iemNativeHlpMemFlatMapDataU64Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1894 | {
|
---|
1895 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1896 | return iemMemMapDataU64RwSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1897 | #else
|
---|
1898 | return iemMemFlatMapDataU64RwJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1899 | #endif
|
---|
1900 | }
|
---|
1901 |
|
---|
1902 |
|
---|
1903 | /**
|
---|
1904 | * Used by TB code to map unsigned 64-bit data writeonly w/ flat address.
|
---|
1905 | */
|
---|
1906 | IEM_DECL_NATIVE_HLP_DEF(uint64_t *, iemNativeHlpMemFlatMapDataU64Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1907 | {
|
---|
1908 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1909 | return iemMemMapDataU64WoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1910 | #else
|
---|
1911 | return iemMemFlatMapDataU64WoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1912 | #endif
|
---|
1913 | }
|
---|
1914 |
|
---|
1915 |
|
---|
1916 | /**
|
---|
1917 | * Used by TB code to map unsigned 64-bit data readonly w/ flat address.
|
---|
1918 | */
|
---|
1919 | IEM_DECL_NATIVE_HLP_DEF(uint64_t const *, iemNativeHlpMemFlatMapDataU64Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1920 | {
|
---|
1921 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1922 | return iemMemMapDataU64RoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1923 | #else
|
---|
1924 | return iemMemFlatMapDataU64RoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1925 | #endif
|
---|
1926 | }
|
---|
1927 |
|
---|
1928 |
|
---|
1929 | /**
|
---|
1930 | * Used by TB code to map 80-bit float data writeonly w/ flat address.
|
---|
1931 | */
|
---|
1932 | IEM_DECL_NATIVE_HLP_DEF(RTFLOAT80U *, iemNativeHlpMemFlatMapDataR80Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1933 | {
|
---|
1934 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1935 | return iemMemMapDataR80WoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1936 | #else
|
---|
1937 | return iemMemFlatMapDataR80WoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1938 | #endif
|
---|
1939 | }
|
---|
1940 |
|
---|
1941 |
|
---|
1942 | /**
|
---|
1943 | * Used by TB code to map 80-bit BCD data writeonly w/ flat address.
|
---|
1944 | */
|
---|
1945 | IEM_DECL_NATIVE_HLP_DEF(RTPBCD80U *, iemNativeHlpMemFlatMapDataD80Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1946 | {
|
---|
1947 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1948 | return iemMemMapDataD80WoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1949 | #else
|
---|
1950 | return iemMemFlatMapDataD80WoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1951 | #endif
|
---|
1952 | }
|
---|
1953 |
|
---|
1954 |
|
---|
1955 | /**
|
---|
1956 | * Used by TB code to map unsigned 128-bit data for atomic read-write w/ flat
|
---|
1957 | * address.
|
---|
1958 | */
|
---|
1959 | IEM_DECL_NATIVE_HLP_DEF(RTUINT128U *, iemNativeHlpMemFlatMapDataU128Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1960 | {
|
---|
1961 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1962 | return iemMemMapDataU128AtSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1963 | #else
|
---|
1964 | return iemMemFlatMapDataU128AtJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1965 | #endif
|
---|
1966 | }
|
---|
1967 |
|
---|
1968 |
|
---|
1969 | /**
|
---|
1970 | * Used by TB code to map unsigned 128-bit data read-write w/ flat address.
|
---|
1971 | */
|
---|
1972 | IEM_DECL_NATIVE_HLP_DEF(RTUINT128U *, iemNativeHlpMemFlatMapDataU128Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1973 | {
|
---|
1974 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1975 | return iemMemMapDataU128RwSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1976 | #else
|
---|
1977 | return iemMemFlatMapDataU128RwJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1978 | #endif
|
---|
1979 | }
|
---|
1980 |
|
---|
1981 |
|
---|
1982 | /**
|
---|
1983 | * Used by TB code to map unsigned 128-bit data writeonly w/ flat address.
|
---|
1984 | */
|
---|
1985 | IEM_DECL_NATIVE_HLP_DEF(RTUINT128U *, iemNativeHlpMemFlatMapDataU128Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1986 | {
|
---|
1987 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
1988 | return iemMemMapDataU128WoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
1989 | #else
|
---|
1990 | return iemMemFlatMapDataU128WoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
1991 | #endif
|
---|
1992 | }
|
---|
1993 |
|
---|
1994 |
|
---|
1995 | /**
|
---|
1996 | * Used by TB code to map unsigned 128-bit data readonly w/ flat address.
|
---|
1997 | */
|
---|
1998 | IEM_DECL_NATIVE_HLP_DEF(RTUINT128U const *, iemNativeHlpMemFlatMapDataU128Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
|
---|
1999 | {
|
---|
2000 | #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
|
---|
2001 | return iemMemMapDataU128RoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
|
---|
2002 | #else
|
---|
2003 | return iemMemFlatMapDataU128RoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
|
---|
2004 | #endif
|
---|
2005 | }
|
---|
2006 |
|
---|
2007 |
|
---|
2008 | /*********************************************************************************************************************************
|
---|
2009 | * Helpers: Commit, rollback & unmap *
|
---|
2010 | *********************************************************************************************************************************/
|
---|
2011 |
|
---|
2012 | /**
|
---|
2013 | * Used by TB code to commit and unmap a read-write memory mapping.
|
---|
2014 | */
|
---|
2015 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemCommitAndUnmapAtomic,(PVMCPUCC pVCpu, uint8_t bUnmapInfo))
|
---|
2016 | {
|
---|
2017 | return iemMemCommitAndUnmapAtSafeJmp(pVCpu, bUnmapInfo);
|
---|
2018 | }
|
---|
2019 |
|
---|
2020 |
|
---|
2021 | /**
|
---|
2022 | * Used by TB code to commit and unmap a read-write memory mapping.
|
---|
2023 | */
|
---|
2024 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemCommitAndUnmapRw,(PVMCPUCC pVCpu, uint8_t bUnmapInfo))
|
---|
2025 | {
|
---|
2026 | return iemMemCommitAndUnmapRwSafeJmp(pVCpu, bUnmapInfo);
|
---|
2027 | }
|
---|
2028 |
|
---|
2029 |
|
---|
2030 | /**
|
---|
2031 | * Used by TB code to commit and unmap a write-only memory mapping.
|
---|
2032 | */
|
---|
2033 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemCommitAndUnmapWo,(PVMCPUCC pVCpu, uint8_t bUnmapInfo))
|
---|
2034 | {
|
---|
2035 | return iemMemCommitAndUnmapWoSafeJmp(pVCpu, bUnmapInfo);
|
---|
2036 | }
|
---|
2037 |
|
---|
2038 |
|
---|
2039 | /**
|
---|
2040 | * Used by TB code to commit and unmap a read-only memory mapping.
|
---|
2041 | */
|
---|
2042 | IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemCommitAndUnmapRo,(PVMCPUCC pVCpu, uint8_t bUnmapInfo))
|
---|
2043 | {
|
---|
2044 | return iemMemCommitAndUnmapRoSafeJmp(pVCpu, bUnmapInfo);
|
---|
2045 | }
|
---|
2046 |
|
---|
2047 |
|
---|
2048 | /**
|
---|
2049 | * Reinitializes the native recompiler state.
|
---|
2050 | *
|
---|
2051 | * Called before starting a new recompile job.
|
---|
2052 | */
|
---|
2053 | static PIEMRECOMPILERSTATE iemNativeReInit(PIEMRECOMPILERSTATE pReNative, PCIEMTB pTb)
|
---|
2054 | {
|
---|
2055 | pReNative->cLabels = 0;
|
---|
2056 | pReNative->bmLabelTypes = 0;
|
---|
2057 | pReNative->cFixups = 0;
|
---|
2058 | #ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
2059 | pReNative->cTbExitFixups = 0;
|
---|
2060 | #endif
|
---|
2061 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
2062 | pReNative->pDbgInfo->cEntries = 0;
|
---|
2063 | pReNative->pDbgInfo->offNativeLast = UINT32_MAX;
|
---|
2064 | #endif
|
---|
2065 | pReNative->pTbOrg = pTb;
|
---|
2066 | pReNative->cCondDepth = 0;
|
---|
2067 | pReNative->uCondSeqNo = 0;
|
---|
2068 | pReNative->uCheckIrqSeqNo = 0;
|
---|
2069 | pReNative->uTlbSeqNo = 0;
|
---|
2070 |
|
---|
2071 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
2072 | pReNative->Core.offPc = 0;
|
---|
2073 | # if defined(IEMNATIVE_WITH_TB_DEBUG_INFO) || defined(VBOX_WITH_STATISTICS)
|
---|
2074 | pReNative->idxInstrPlusOneOfLastPcUpdate = 0;
|
---|
2075 | # endif
|
---|
2076 | # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
|
---|
2077 | pReNative->Core.fDebugPcInitialized = false;
|
---|
2078 | # endif
|
---|
2079 | #endif
|
---|
2080 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
2081 | pReNative->fSimdRaiseXcptChecksEmitted = 0;
|
---|
2082 | #endif
|
---|
2083 | pReNative->Core.bmHstRegs = IEMNATIVE_REG_FIXED_MASK
|
---|
2084 | #if IEMNATIVE_HST_GREG_COUNT < 32
|
---|
2085 | | ~(RT_BIT(IEMNATIVE_HST_GREG_COUNT) - 1U)
|
---|
2086 | #endif
|
---|
2087 | ;
|
---|
2088 | pReNative->Core.bmHstRegsWithGstShadow = 0;
|
---|
2089 | pReNative->Core.bmGstRegShadows = 0;
|
---|
2090 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
2091 | pReNative->Core.bmGstRegShadowDirty = 0;
|
---|
2092 | #endif
|
---|
2093 | pReNative->Core.bmVars = 0;
|
---|
2094 | pReNative->Core.bmStack = 0;
|
---|
2095 | AssertCompile(sizeof(pReNative->Core.bmStack) * 8 == IEMNATIVE_FRAME_VAR_SLOTS); /* Must set reserved slots to 1 otherwise. */
|
---|
2096 | pReNative->Core.u64ArgVars = UINT64_MAX;
|
---|
2097 |
|
---|
2098 | AssertCompile(RT_ELEMENTS(pReNative->aidxUniqueLabels) == 23);
|
---|
2099 | pReNative->aidxUniqueLabels[0] = UINT32_MAX;
|
---|
2100 | pReNative->aidxUniqueLabels[1] = UINT32_MAX;
|
---|
2101 | pReNative->aidxUniqueLabels[2] = UINT32_MAX;
|
---|
2102 | pReNative->aidxUniqueLabels[3] = UINT32_MAX;
|
---|
2103 | pReNative->aidxUniqueLabels[4] = UINT32_MAX;
|
---|
2104 | pReNative->aidxUniqueLabels[5] = UINT32_MAX;
|
---|
2105 | pReNative->aidxUniqueLabels[6] = UINT32_MAX;
|
---|
2106 | pReNative->aidxUniqueLabels[7] = UINT32_MAX;
|
---|
2107 | pReNative->aidxUniqueLabels[8] = UINT32_MAX;
|
---|
2108 | pReNative->aidxUniqueLabels[9] = UINT32_MAX;
|
---|
2109 | pReNative->aidxUniqueLabels[10] = UINT32_MAX;
|
---|
2110 | pReNative->aidxUniqueLabels[11] = UINT32_MAX;
|
---|
2111 | pReNative->aidxUniqueLabels[12] = UINT32_MAX;
|
---|
2112 | pReNative->aidxUniqueLabels[13] = UINT32_MAX;
|
---|
2113 | pReNative->aidxUniqueLabels[14] = UINT32_MAX;
|
---|
2114 | pReNative->aidxUniqueLabels[15] = UINT32_MAX;
|
---|
2115 | pReNative->aidxUniqueLabels[16] = UINT32_MAX;
|
---|
2116 | pReNative->aidxUniqueLabels[17] = UINT32_MAX;
|
---|
2117 | pReNative->aidxUniqueLabels[18] = UINT32_MAX;
|
---|
2118 | pReNative->aidxUniqueLabels[19] = UINT32_MAX;
|
---|
2119 | pReNative->aidxUniqueLabels[20] = UINT32_MAX;
|
---|
2120 | pReNative->aidxUniqueLabels[21] = UINT32_MAX;
|
---|
2121 | pReNative->aidxUniqueLabels[22] = UINT32_MAX;
|
---|
2122 |
|
---|
2123 | pReNative->idxLastCheckIrqCallNo = UINT32_MAX;
|
---|
2124 |
|
---|
2125 | /* Full host register reinit: */
|
---|
2126 | for (unsigned i = 0; i < RT_ELEMENTS(pReNative->Core.aHstRegs); i++)
|
---|
2127 | {
|
---|
2128 | pReNative->Core.aHstRegs[i].fGstRegShadows = 0;
|
---|
2129 | pReNative->Core.aHstRegs[i].enmWhat = kIemNativeWhat_Invalid;
|
---|
2130 | pReNative->Core.aHstRegs[i].idxVar = UINT8_MAX;
|
---|
2131 | }
|
---|
2132 |
|
---|
2133 | uint32_t fRegs = IEMNATIVE_REG_FIXED_MASK
|
---|
2134 | & ~( RT_BIT_32(IEMNATIVE_REG_FIXED_PVMCPU)
|
---|
2135 | #ifdef IEMNATIVE_REG_FIXED_PCPUMCTX
|
---|
2136 | | RT_BIT_32(IEMNATIVE_REG_FIXED_PCPUMCTX)
|
---|
2137 | #endif
|
---|
2138 | #ifdef IEMNATIVE_REG_FIXED_PCPUMCTX
|
---|
2139 | | RT_BIT_32(IEMNATIVE_REG_FIXED_TMP0)
|
---|
2140 | #endif
|
---|
2141 | #ifdef IEMNATIVE_REG_FIXED_TMP1
|
---|
2142 | | RT_BIT_32(IEMNATIVE_REG_FIXED_TMP1)
|
---|
2143 | #endif
|
---|
2144 | #ifdef IEMNATIVE_REG_FIXED_PC_DBG
|
---|
2145 | | RT_BIT_32(IEMNATIVE_REG_FIXED_PC_DBG)
|
---|
2146 | #endif
|
---|
2147 | );
|
---|
2148 | for (uint32_t idxReg = ASMBitFirstSetU32(fRegs) - 1; fRegs != 0; idxReg = ASMBitFirstSetU32(fRegs) - 1)
|
---|
2149 | {
|
---|
2150 | fRegs &= ~RT_BIT_32(idxReg);
|
---|
2151 | pReNative->Core.aHstRegs[IEMNATIVE_REG_FIXED_PVMCPU].enmWhat = kIemNativeWhat_FixedReserved;
|
---|
2152 | }
|
---|
2153 |
|
---|
2154 | pReNative->Core.aHstRegs[IEMNATIVE_REG_FIXED_PVMCPU].enmWhat = kIemNativeWhat_pVCpuFixed;
|
---|
2155 | #ifdef IEMNATIVE_REG_FIXED_PCPUMCTX
|
---|
2156 | pReNative->Core.aHstRegs[IEMNATIVE_REG_FIXED_PCPUMCTX].enmWhat = kIemNativeWhat_pCtxFixed;
|
---|
2157 | #endif
|
---|
2158 | #ifdef IEMNATIVE_REG_FIXED_TMP0
|
---|
2159 | pReNative->Core.aHstRegs[IEMNATIVE_REG_FIXED_TMP0].enmWhat = kIemNativeWhat_FixedTmp;
|
---|
2160 | #endif
|
---|
2161 | #ifdef IEMNATIVE_REG_FIXED_TMP1
|
---|
2162 | pReNative->Core.aHstRegs[IEMNATIVE_REG_FIXED_TMP1].enmWhat = kIemNativeWhat_FixedTmp;
|
---|
2163 | #endif
|
---|
2164 | #ifdef IEMNATIVE_REG_FIXED_PC_DBG
|
---|
2165 | pReNative->Core.aHstRegs[IEMNATIVE_REG_FIXED_PC_DBG].enmWhat = kIemNativeWhat_PcShadow;
|
---|
2166 | #endif
|
---|
2167 |
|
---|
2168 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
2169 | pReNative->Core.bmHstSimdRegs = IEMNATIVE_SIMD_REG_FIXED_MASK
|
---|
2170 | # if IEMNATIVE_HST_SIMD_REG_COUNT < 32
|
---|
2171 | | ~(RT_BIT(IEMNATIVE_HST_SIMD_REG_COUNT) - 1U)
|
---|
2172 | # endif
|
---|
2173 | ;
|
---|
2174 | pReNative->Core.bmHstSimdRegsWithGstShadow = 0;
|
---|
2175 | pReNative->Core.bmGstSimdRegShadows = 0;
|
---|
2176 | pReNative->Core.bmGstSimdRegShadowDirtyLo128 = 0;
|
---|
2177 | pReNative->Core.bmGstSimdRegShadowDirtyHi128 = 0;
|
---|
2178 |
|
---|
2179 | /* Full host register reinit: */
|
---|
2180 | for (unsigned i = 0; i < RT_ELEMENTS(pReNative->Core.aHstSimdRegs); i++)
|
---|
2181 | {
|
---|
2182 | pReNative->Core.aHstSimdRegs[i].fGstRegShadows = 0;
|
---|
2183 | pReNative->Core.aHstSimdRegs[i].enmWhat = kIemNativeWhat_Invalid;
|
---|
2184 | pReNative->Core.aHstSimdRegs[i].idxVar = UINT8_MAX;
|
---|
2185 | pReNative->Core.aHstSimdRegs[i].enmLoaded = kIemNativeGstSimdRegLdStSz_Invalid;
|
---|
2186 | }
|
---|
2187 |
|
---|
2188 | fRegs = IEMNATIVE_SIMD_REG_FIXED_MASK;
|
---|
2189 | for (uint32_t idxReg = ASMBitFirstSetU32(fRegs) - 1; fRegs != 0; idxReg = ASMBitFirstSetU32(fRegs) - 1)
|
---|
2190 | {
|
---|
2191 | fRegs &= ~RT_BIT_32(idxReg);
|
---|
2192 | pReNative->Core.aHstSimdRegs[idxReg].enmWhat = kIemNativeWhat_FixedReserved;
|
---|
2193 | }
|
---|
2194 |
|
---|
2195 | #ifdef IEMNATIVE_SIMD_REG_FIXED_TMP0
|
---|
2196 | pReNative->Core.aHstSimdRegs[IEMNATIVE_SIMD_REG_FIXED_TMP0].enmWhat = kIemNativeWhat_FixedTmp;
|
---|
2197 | #endif
|
---|
2198 |
|
---|
2199 | #endif
|
---|
2200 |
|
---|
2201 | return pReNative;
|
---|
2202 | }
|
---|
2203 |
|
---|
2204 |
|
---|
2205 | /**
|
---|
2206 | * Used when done emitting the per-chunk code and for iemNativeInit bailout.
|
---|
2207 | */
|
---|
2208 | static void iemNativeTerm(PIEMRECOMPILERSTATE pReNative)
|
---|
2209 | {
|
---|
2210 | RTMemFree(pReNative->pInstrBuf);
|
---|
2211 | RTMemFree(pReNative->paLabels);
|
---|
2212 | RTMemFree(pReNative->paFixups);
|
---|
2213 | #ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
2214 | RTMemFree(pReNative->paTbExitFixups);
|
---|
2215 | #endif
|
---|
2216 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
2217 | RTMemFree(pReNative->pDbgInfo);
|
---|
2218 | #endif
|
---|
2219 | RTMemFree(pReNative);
|
---|
2220 | }
|
---|
2221 |
|
---|
2222 |
|
---|
2223 | /**
|
---|
2224 | * Allocates and initializes the native recompiler state.
|
---|
2225 | *
|
---|
2226 | * This is called the first time an EMT wants to recompile something.
|
---|
2227 | *
|
---|
2228 | * @returns Pointer to the new recompiler state.
|
---|
2229 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
2230 | * thread.
|
---|
2231 | * @param pTb The TB that's about to be recompiled. When this is NULL,
|
---|
2232 | * the recompiler state is for emitting the common per-chunk
|
---|
2233 | * code from iemNativeRecompileAttachExecMemChunkCtx.
|
---|
2234 | * @thread EMT(pVCpu)
|
---|
2235 | */
|
---|
2236 | static PIEMRECOMPILERSTATE iemNativeInit(PVMCPUCC pVCpu, PCIEMTB pTb)
|
---|
2237 | {
|
---|
2238 | VMCPU_ASSERT_EMT(pVCpu);
|
---|
2239 |
|
---|
2240 | PIEMRECOMPILERSTATE pReNative = (PIEMRECOMPILERSTATE)RTMemAllocZ(sizeof(*pReNative));
|
---|
2241 | AssertReturn(pReNative, NULL);
|
---|
2242 |
|
---|
2243 | /*
|
---|
2244 | * Try allocate all the buffers and stuff we need.
|
---|
2245 | */
|
---|
2246 | uint32_t const cFactor = pTb ? 1 : 32 /* per-chunk stuff doesn't really need anything but the code buffer */;
|
---|
2247 | pReNative->pInstrBuf = (PIEMNATIVEINSTR)RTMemAllocZ(_64K);
|
---|
2248 | pReNative->paLabels = (PIEMNATIVELABEL)RTMemAllocZ(sizeof(IEMNATIVELABEL) * _8K / cFactor);
|
---|
2249 | pReNative->paFixups = (PIEMNATIVEFIXUP)RTMemAllocZ(sizeof(IEMNATIVEFIXUP) * _16K / cFactor);
|
---|
2250 | #ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
2251 | pReNative->paTbExitFixups = (PIEMNATIVEEXITFIXUP)RTMemAllocZ(sizeof(IEMNATIVEEXITFIXUP) * _8K / cFactor);
|
---|
2252 | #endif
|
---|
2253 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
2254 | pReNative->pDbgInfo = (PIEMTBDBG)RTMemAllocZ(RT_UOFFSETOF_DYN(IEMTBDBG, aEntries[_16K / cFactor]));
|
---|
2255 | #endif
|
---|
2256 | if (RT_LIKELY( pReNative->pInstrBuf
|
---|
2257 | && pReNative->paLabels
|
---|
2258 | && pReNative->paFixups)
|
---|
2259 | #ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
2260 | && pReNative->paTbExitFixups
|
---|
2261 | #endif
|
---|
2262 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
2263 | && pReNative->pDbgInfo
|
---|
2264 | #endif
|
---|
2265 | )
|
---|
2266 | {
|
---|
2267 | /*
|
---|
2268 | * Set the buffer & array sizes on success.
|
---|
2269 | */
|
---|
2270 | pReNative->cInstrBufAlloc = _64K / sizeof(IEMNATIVEINSTR);
|
---|
2271 | pReNative->cLabelsAlloc = _8K / cFactor;
|
---|
2272 | pReNative->cFixupsAlloc = _16K / cFactor;
|
---|
2273 | #ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
2274 | pReNative->cTbExitFixupsAlloc = _8K / cFactor;
|
---|
2275 | #endif
|
---|
2276 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
2277 | pReNative->cDbgInfoAlloc = _16K / cFactor;
|
---|
2278 | #endif
|
---|
2279 |
|
---|
2280 | /* Other constant stuff: */
|
---|
2281 | pReNative->pVCpu = pVCpu;
|
---|
2282 |
|
---|
2283 | /*
|
---|
2284 | * Done, just reinit it.
|
---|
2285 | */
|
---|
2286 | return iemNativeReInit(pReNative, pTb);
|
---|
2287 | }
|
---|
2288 |
|
---|
2289 | /*
|
---|
2290 | * Failed. Cleanup and return.
|
---|
2291 | */
|
---|
2292 | AssertFailed();
|
---|
2293 | iemNativeTerm(pReNative);
|
---|
2294 | return NULL;
|
---|
2295 | }
|
---|
2296 |
|
---|
2297 |
|
---|
2298 | /**
|
---|
2299 | * Creates a label
|
---|
2300 | *
|
---|
2301 | * If the label does not yet have a defined position,
|
---|
2302 | * call iemNativeLabelDefine() later to set it.
|
---|
2303 | *
|
---|
2304 | * @returns Label ID. Throws VBox status code on failure, so no need to check
|
---|
2305 | * the return value.
|
---|
2306 | * @param pReNative The native recompile state.
|
---|
2307 | * @param enmType The label type.
|
---|
2308 | * @param offWhere The instruction offset of the label. UINT32_MAX if the
|
---|
2309 | * label is not yet defined (default).
|
---|
2310 | * @param uData Data associated with the lable. Only applicable to
|
---|
2311 | * certain type of labels. Default is zero.
|
---|
2312 | */
|
---|
2313 | DECL_HIDDEN_THROW(uint32_t)
|
---|
2314 | iemNativeLabelCreate(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType,
|
---|
2315 | uint32_t offWhere /*= UINT32_MAX*/, uint16_t uData /*= 0*/)
|
---|
2316 | {
|
---|
2317 | Assert(uData == 0 || enmType >= kIemNativeLabelType_FirstWithMultipleInstances);
|
---|
2318 | #if defined(IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE) && defined(RT_ARCH_AMD64)
|
---|
2319 | Assert(enmType >= kIemNativeLabelType_LoopJumpTarget);
|
---|
2320 | #endif
|
---|
2321 |
|
---|
2322 | /*
|
---|
2323 | * Locate existing label definition.
|
---|
2324 | *
|
---|
2325 | * This is only allowed for forward declarations where offWhere=UINT32_MAX
|
---|
2326 | * and uData is zero.
|
---|
2327 | */
|
---|
2328 | PIEMNATIVELABEL paLabels = pReNative->paLabels;
|
---|
2329 | uint32_t const cLabels = pReNative->cLabels;
|
---|
2330 | if ( pReNative->bmLabelTypes & RT_BIT_64(enmType)
|
---|
2331 | #ifndef VBOX_STRICT
|
---|
2332 | && enmType < kIemNativeLabelType_FirstWithMultipleInstances
|
---|
2333 | && offWhere == UINT32_MAX
|
---|
2334 | && uData == 0
|
---|
2335 | #endif
|
---|
2336 | )
|
---|
2337 | {
|
---|
2338 | #ifndef VBOX_STRICT
|
---|
2339 | AssertStmt(enmType > kIemNativeLabelType_Invalid && enmType < kIemNativeLabelType_FirstWithMultipleInstances,
|
---|
2340 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_1));
|
---|
2341 | uint32_t const idxLabel = pReNative->aidxUniqueLabels[enmType];
|
---|
2342 | if (idxLabel < pReNative->cLabels)
|
---|
2343 | return idxLabel;
|
---|
2344 | #else
|
---|
2345 | for (uint32_t i = 0; i < cLabels; i++)
|
---|
2346 | if ( paLabels[i].enmType == enmType
|
---|
2347 | && paLabels[i].uData == uData)
|
---|
2348 | {
|
---|
2349 | AssertStmt(uData == 0, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_1));
|
---|
2350 | AssertStmt(offWhere == UINT32_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_1));
|
---|
2351 | AssertStmt(paLabels[i].off == UINT32_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_2));
|
---|
2352 | AssertStmt(enmType < kIemNativeLabelType_FirstWithMultipleInstances && pReNative->aidxUniqueLabels[enmType] == i,
|
---|
2353 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_1));
|
---|
2354 | return i;
|
---|
2355 | }
|
---|
2356 | AssertStmt( enmType >= kIemNativeLabelType_FirstWithMultipleInstances
|
---|
2357 | || pReNative->aidxUniqueLabels[enmType] == UINT32_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_1));
|
---|
2358 | #endif
|
---|
2359 | }
|
---|
2360 |
|
---|
2361 | /*
|
---|
2362 | * Make sure we've got room for another label.
|
---|
2363 | */
|
---|
2364 | if (RT_LIKELY(cLabels < pReNative->cLabelsAlloc))
|
---|
2365 | { /* likely */ }
|
---|
2366 | else
|
---|
2367 | {
|
---|
2368 | uint32_t cNew = pReNative->cLabelsAlloc;
|
---|
2369 | AssertStmt(cNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_3));
|
---|
2370 | AssertStmt(cLabels == cNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_3));
|
---|
2371 | cNew *= 2;
|
---|
2372 | AssertStmt(cNew <= _64K, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_TOO_MANY)); /* IEMNATIVEFIXUP::idxLabel type restrict this */
|
---|
2373 | paLabels = (PIEMNATIVELABEL)RTMemRealloc(paLabels, cNew * sizeof(paLabels[0]));
|
---|
2374 | AssertStmt(paLabels, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_OUT_OF_MEMORY));
|
---|
2375 | pReNative->paLabels = paLabels;
|
---|
2376 | pReNative->cLabelsAlloc = cNew;
|
---|
2377 | }
|
---|
2378 |
|
---|
2379 | /*
|
---|
2380 | * Define a new label.
|
---|
2381 | */
|
---|
2382 | paLabels[cLabels].off = offWhere;
|
---|
2383 | paLabels[cLabels].enmType = enmType;
|
---|
2384 | paLabels[cLabels].uData = uData;
|
---|
2385 | pReNative->cLabels = cLabels + 1;
|
---|
2386 |
|
---|
2387 | Assert((unsigned)enmType < 64);
|
---|
2388 | pReNative->bmLabelTypes |= RT_BIT_64(enmType);
|
---|
2389 |
|
---|
2390 | if (enmType < kIemNativeLabelType_FirstWithMultipleInstances)
|
---|
2391 | {
|
---|
2392 | Assert(uData == 0);
|
---|
2393 | pReNative->aidxUniqueLabels[enmType] = cLabels;
|
---|
2394 | }
|
---|
2395 |
|
---|
2396 | if (offWhere != UINT32_MAX)
|
---|
2397 | {
|
---|
2398 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
2399 | iemNativeDbgInfoAddNativeOffset(pReNative, offWhere);
|
---|
2400 | iemNativeDbgInfoAddLabel(pReNative, enmType, uData);
|
---|
2401 | #endif
|
---|
2402 | }
|
---|
2403 | return cLabels;
|
---|
2404 | }
|
---|
2405 |
|
---|
2406 |
|
---|
2407 | /**
|
---|
2408 | * Defines the location of an existing label.
|
---|
2409 | *
|
---|
2410 | * @param pReNative The native recompile state.
|
---|
2411 | * @param idxLabel The label to define.
|
---|
2412 | * @param offWhere The position.
|
---|
2413 | */
|
---|
2414 | DECL_HIDDEN_THROW(void) iemNativeLabelDefine(PIEMRECOMPILERSTATE pReNative, uint32_t idxLabel, uint32_t offWhere)
|
---|
2415 | {
|
---|
2416 | AssertStmt(idxLabel < pReNative->cLabels, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_4));
|
---|
2417 | PIEMNATIVELABEL const pLabel = &pReNative->paLabels[idxLabel];
|
---|
2418 | AssertStmt(pLabel->off == UINT32_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_5));
|
---|
2419 | pLabel->off = offWhere;
|
---|
2420 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
2421 | iemNativeDbgInfoAddNativeOffset(pReNative, offWhere);
|
---|
2422 | iemNativeDbgInfoAddLabel(pReNative, (IEMNATIVELABELTYPE)pLabel->enmType, pLabel->uData);
|
---|
2423 | #endif
|
---|
2424 | }
|
---|
2425 |
|
---|
2426 |
|
---|
2427 | /**
|
---|
2428 | * Looks up a lable.
|
---|
2429 | *
|
---|
2430 | * @returns Label ID if found, UINT32_MAX if not.
|
---|
2431 | */
|
---|
2432 | DECLHIDDEN(uint32_t) iemNativeLabelFind(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType,
|
---|
2433 | uint32_t offWhere /*= UINT32_MAX*/, uint16_t uData /*= 0*/) RT_NOEXCEPT
|
---|
2434 | {
|
---|
2435 | Assert((unsigned)enmType < 64);
|
---|
2436 | if (RT_BIT_64(enmType) & pReNative->bmLabelTypes)
|
---|
2437 | {
|
---|
2438 | if (enmType < kIemNativeLabelType_FirstWithMultipleInstances)
|
---|
2439 | return pReNative->aidxUniqueLabels[enmType];
|
---|
2440 |
|
---|
2441 | PIEMNATIVELABEL paLabels = pReNative->paLabels;
|
---|
2442 | uint32_t const cLabels = pReNative->cLabels;
|
---|
2443 | for (uint32_t i = 0; i < cLabels; i++)
|
---|
2444 | if ( paLabels[i].enmType == enmType
|
---|
2445 | && paLabels[i].uData == uData
|
---|
2446 | && ( paLabels[i].off == offWhere
|
---|
2447 | || offWhere == UINT32_MAX
|
---|
2448 | || paLabels[i].off == UINT32_MAX))
|
---|
2449 | return i;
|
---|
2450 | }
|
---|
2451 | return UINT32_MAX;
|
---|
2452 | }
|
---|
2453 |
|
---|
2454 |
|
---|
2455 | /**
|
---|
2456 | * Adds a fixup.
|
---|
2457 | *
|
---|
2458 | * @throws VBox status code (int) on failure.
|
---|
2459 | * @param pReNative The native recompile state.
|
---|
2460 | * @param offWhere The instruction offset of the fixup location.
|
---|
2461 | * @param idxLabel The target label ID for the fixup.
|
---|
2462 | * @param enmType The fixup type.
|
---|
2463 | * @param offAddend Fixup addend if applicable to the type. Default is 0.
|
---|
2464 | */
|
---|
2465 | DECL_HIDDEN_THROW(void)
|
---|
2466 | iemNativeAddFixup(PIEMRECOMPILERSTATE pReNative, uint32_t offWhere, uint32_t idxLabel,
|
---|
2467 | IEMNATIVEFIXUPTYPE enmType, int8_t offAddend /*= 0*/)
|
---|
2468 | {
|
---|
2469 | Assert(idxLabel <= UINT16_MAX);
|
---|
2470 | Assert((unsigned)enmType <= UINT8_MAX);
|
---|
2471 | #ifdef RT_ARCH_ARM64
|
---|
2472 | AssertStmt( enmType != kIemNativeFixupType_RelImm14At5
|
---|
2473 | || pReNative->paLabels[idxLabel].enmType >= kIemNativeLabelType_LastWholeTbBranch,
|
---|
2474 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_SHORT_JMP_TO_TAIL_LABEL));
|
---|
2475 | #endif
|
---|
2476 |
|
---|
2477 | /*
|
---|
2478 | * Make sure we've room.
|
---|
2479 | */
|
---|
2480 | PIEMNATIVEFIXUP paFixups = pReNative->paFixups;
|
---|
2481 | uint32_t const cFixups = pReNative->cFixups;
|
---|
2482 | if (RT_LIKELY(cFixups < pReNative->cFixupsAlloc))
|
---|
2483 | { /* likely */ }
|
---|
2484 | else
|
---|
2485 | {
|
---|
2486 | uint32_t cNew = pReNative->cFixupsAlloc;
|
---|
2487 | AssertStmt(cNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_IPE_1));
|
---|
2488 | AssertStmt(cFixups == cNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_IPE_1));
|
---|
2489 | cNew *= 2;
|
---|
2490 | AssertStmt(cNew <= _128K, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_TOO_MANY));
|
---|
2491 | paFixups = (PIEMNATIVEFIXUP)RTMemRealloc(paFixups, cNew * sizeof(paFixups[0]));
|
---|
2492 | AssertStmt(paFixups, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_OUT_OF_MEMORY));
|
---|
2493 | pReNative->paFixups = paFixups;
|
---|
2494 | pReNative->cFixupsAlloc = cNew;
|
---|
2495 | }
|
---|
2496 |
|
---|
2497 | /*
|
---|
2498 | * Add the fixup.
|
---|
2499 | */
|
---|
2500 | paFixups[cFixups].off = offWhere;
|
---|
2501 | paFixups[cFixups].idxLabel = (uint16_t)idxLabel;
|
---|
2502 | paFixups[cFixups].enmType = enmType;
|
---|
2503 | paFixups[cFixups].offAddend = offAddend;
|
---|
2504 | pReNative->cFixups = cFixups + 1;
|
---|
2505 | }
|
---|
2506 |
|
---|
2507 |
|
---|
2508 | #ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
2509 | /**
|
---|
2510 | * Adds a fixup to the per chunk tail code.
|
---|
2511 | *
|
---|
2512 | * @throws VBox status code (int) on failure.
|
---|
2513 | * @param pReNative The native recompile state.
|
---|
2514 | * @param offWhere The instruction offset of the fixup location.
|
---|
2515 | * @param enmExitReason The exit reason to jump to.
|
---|
2516 | */
|
---|
2517 | DECL_HIDDEN_THROW(void)
|
---|
2518 | iemNativeAddTbExitFixup(PIEMRECOMPILERSTATE pReNative, uint32_t offWhere, IEMNATIVELABELTYPE enmExitReason)
|
---|
2519 | {
|
---|
2520 | Assert(IEMNATIVELABELTYPE_IS_EXIT_REASON(enmExitReason));
|
---|
2521 |
|
---|
2522 | /*
|
---|
2523 | * Make sure we've room.
|
---|
2524 | */
|
---|
2525 | PIEMNATIVEEXITFIXUP paTbExitFixups = pReNative->paTbExitFixups;
|
---|
2526 | uint32_t const cTbExitFixups = pReNative->cTbExitFixups;
|
---|
2527 | if (RT_LIKELY(cTbExitFixups < pReNative->cTbExitFixupsAlloc))
|
---|
2528 | { /* likely */ }
|
---|
2529 | else
|
---|
2530 | {
|
---|
2531 | uint32_t cNew = pReNative->cTbExitFixupsAlloc;
|
---|
2532 | AssertStmt(cNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_IPE_1));
|
---|
2533 | AssertStmt(cTbExitFixups == cNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_IPE_1));
|
---|
2534 | cNew *= 2;
|
---|
2535 | AssertStmt(cNew <= _128K, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_TOO_MANY));
|
---|
2536 | paTbExitFixups = (PIEMNATIVEEXITFIXUP)RTMemRealloc(paTbExitFixups, cNew * sizeof(paTbExitFixups[0]));
|
---|
2537 | AssertStmt(paTbExitFixups, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_OUT_OF_MEMORY));
|
---|
2538 | pReNative->paTbExitFixups = paTbExitFixups;
|
---|
2539 | pReNative->cTbExitFixupsAlloc = cNew;
|
---|
2540 | }
|
---|
2541 |
|
---|
2542 | /*
|
---|
2543 | * Add the fixup.
|
---|
2544 | */
|
---|
2545 | paTbExitFixups[cTbExitFixups].off = offWhere;
|
---|
2546 | paTbExitFixups[cTbExitFixups].enmExitReason = enmExitReason;
|
---|
2547 | pReNative->cTbExitFixups = cTbExitFixups + 1;
|
---|
2548 | }
|
---|
2549 | #endif
|
---|
2550 |
|
---|
2551 |
|
---|
2552 | /**
|
---|
2553 | * Slow code path for iemNativeInstrBufEnsure.
|
---|
2554 | */
|
---|
2555 | DECL_HIDDEN_THROW(PIEMNATIVEINSTR) iemNativeInstrBufEnsureSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t cInstrReq)
|
---|
2556 | {
|
---|
2557 | /* Double the buffer size till we meet the request. */
|
---|
2558 | uint32_t cNew = pReNative->cInstrBufAlloc;
|
---|
2559 | AssertStmt(cNew > 0, IEMNATIVE_DO_LONGJMP(pReNative, VERR_INTERNAL_ERROR_5)); /* impossible */
|
---|
2560 | do
|
---|
2561 | cNew *= 2;
|
---|
2562 | while (cNew < off + cInstrReq);
|
---|
2563 |
|
---|
2564 | uint32_t const cbNew = cNew * sizeof(IEMNATIVEINSTR);
|
---|
2565 | #ifdef RT_ARCH_ARM64
|
---|
2566 | uint32_t const cbMaxInstrBuf = _1M; /* Limited by the branch instruction range (18+2 bits). */
|
---|
2567 | #else
|
---|
2568 | uint32_t const cbMaxInstrBuf = _2M;
|
---|
2569 | #endif
|
---|
2570 | AssertStmt(cbNew <= cbMaxInstrBuf, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_INSTR_BUF_TOO_LARGE));
|
---|
2571 |
|
---|
2572 | void *pvNew = RTMemRealloc(pReNative->pInstrBuf, cbNew);
|
---|
2573 | AssertStmt(pvNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_INSTR_BUF_OUT_OF_MEMORY));
|
---|
2574 |
|
---|
2575 | #ifdef VBOX_STRICT
|
---|
2576 | pReNative->offInstrBufChecked = off + cInstrReq;
|
---|
2577 | #endif
|
---|
2578 | pReNative->cInstrBufAlloc = cNew;
|
---|
2579 | return pReNative->pInstrBuf = (PIEMNATIVEINSTR)pvNew;
|
---|
2580 | }
|
---|
2581 |
|
---|
2582 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
2583 |
|
---|
2584 | /**
|
---|
2585 | * Grows the static debug info array used during recompilation.
|
---|
2586 | *
|
---|
2587 | * @returns Pointer to the new debug info block; throws VBox status code on
|
---|
2588 | * failure, so no need to check the return value.
|
---|
2589 | */
|
---|
2590 | DECL_NO_INLINE(static, PIEMTBDBG) iemNativeDbgInfoGrow(PIEMRECOMPILERSTATE pReNative, PIEMTBDBG pDbgInfo)
|
---|
2591 | {
|
---|
2592 | uint32_t cNew = pReNative->cDbgInfoAlloc * 2;
|
---|
2593 | AssertStmt(cNew < _1M && cNew != 0, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_DBGINFO_IPE_1));
|
---|
2594 | pDbgInfo = (PIEMTBDBG)RTMemRealloc(pDbgInfo, RT_UOFFSETOF_DYN(IEMTBDBG, aEntries[cNew]));
|
---|
2595 | AssertStmt(pDbgInfo, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_DBGINFO_OUT_OF_MEMORY));
|
---|
2596 | pReNative->pDbgInfo = pDbgInfo;
|
---|
2597 | pReNative->cDbgInfoAlloc = cNew;
|
---|
2598 | return pDbgInfo;
|
---|
2599 | }
|
---|
2600 |
|
---|
2601 |
|
---|
2602 | /**
|
---|
2603 | * Adds a new debug info uninitialized entry, returning the pointer to it.
|
---|
2604 | */
|
---|
2605 | DECL_INLINE_THROW(PIEMTBDBGENTRY) iemNativeDbgInfoAddNewEntry(PIEMRECOMPILERSTATE pReNative, PIEMTBDBG pDbgInfo)
|
---|
2606 | {
|
---|
2607 | if (RT_LIKELY(pDbgInfo->cEntries < pReNative->cDbgInfoAlloc))
|
---|
2608 | { /* likely */ }
|
---|
2609 | else
|
---|
2610 | pDbgInfo = iemNativeDbgInfoGrow(pReNative, pDbgInfo);
|
---|
2611 | return &pDbgInfo->aEntries[pDbgInfo->cEntries++];
|
---|
2612 | }
|
---|
2613 |
|
---|
2614 |
|
---|
2615 | /**
|
---|
2616 | * Debug Info: Adds a native offset record, if necessary.
|
---|
2617 | */
|
---|
2618 | DECL_HIDDEN_THROW(void) iemNativeDbgInfoAddNativeOffset(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
2619 | {
|
---|
2620 | PIEMTBDBG pDbgInfo = pReNative->pDbgInfo;
|
---|
2621 |
|
---|
2622 | /*
|
---|
2623 | * Do we need this one?
|
---|
2624 | */
|
---|
2625 | uint32_t const offPrev = pDbgInfo->offNativeLast;
|
---|
2626 | if (offPrev == off)
|
---|
2627 | return;
|
---|
2628 | AssertStmt(offPrev < off || offPrev == UINT32_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_DBGINFO_IPE_2));
|
---|
2629 |
|
---|
2630 | /*
|
---|
2631 | * Add it.
|
---|
2632 | */
|
---|
2633 | PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pDbgInfo);
|
---|
2634 | pEntry->NativeOffset.uType = kIemTbDbgEntryType_NativeOffset;
|
---|
2635 | pEntry->NativeOffset.offNative = off;
|
---|
2636 | pDbgInfo->offNativeLast = off;
|
---|
2637 | }
|
---|
2638 |
|
---|
2639 |
|
---|
2640 | /**
|
---|
2641 | * Debug Info: Record info about a label.
|
---|
2642 | */
|
---|
2643 | static void iemNativeDbgInfoAddLabel(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType, uint16_t uData)
|
---|
2644 | {
|
---|
2645 | PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
|
---|
2646 | pEntry->Label.uType = kIemTbDbgEntryType_Label;
|
---|
2647 | pEntry->Label.uUnused = 0;
|
---|
2648 | pEntry->Label.enmLabel = (uint8_t)enmType;
|
---|
2649 | pEntry->Label.uData = uData;
|
---|
2650 | }
|
---|
2651 |
|
---|
2652 |
|
---|
2653 | /**
|
---|
2654 | * Debug Info: Record info about a threaded call.
|
---|
2655 | */
|
---|
2656 | static void iemNativeDbgInfoAddThreadedCall(PIEMRECOMPILERSTATE pReNative, IEMTHREADEDFUNCS enmCall, bool fRecompiled)
|
---|
2657 | {
|
---|
2658 | PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
|
---|
2659 | pEntry->ThreadedCall.uType = kIemTbDbgEntryType_ThreadedCall;
|
---|
2660 | pEntry->ThreadedCall.fRecompiled = fRecompiled;
|
---|
2661 | pEntry->ThreadedCall.uUnused = 0;
|
---|
2662 | pEntry->ThreadedCall.enmCall = (uint16_t)enmCall;
|
---|
2663 | }
|
---|
2664 |
|
---|
2665 |
|
---|
2666 | /**
|
---|
2667 | * Debug Info: Record info about a new guest instruction.
|
---|
2668 | */
|
---|
2669 | static void iemNativeDbgInfoAddGuestInstruction(PIEMRECOMPILERSTATE pReNative, uint32_t fExec)
|
---|
2670 | {
|
---|
2671 | PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
|
---|
2672 | pEntry->GuestInstruction.uType = kIemTbDbgEntryType_GuestInstruction;
|
---|
2673 | pEntry->GuestInstruction.uUnused = 0;
|
---|
2674 | pEntry->GuestInstruction.fExec = fExec;
|
---|
2675 | }
|
---|
2676 |
|
---|
2677 |
|
---|
2678 | /**
|
---|
2679 | * Debug Info: Record info about guest register shadowing.
|
---|
2680 | */
|
---|
2681 | DECL_HIDDEN_THROW(void)
|
---|
2682 | iemNativeDbgInfoAddGuestRegShadowing(PIEMRECOMPILERSTATE pReNative, IEMNATIVEGSTREG enmGstReg,
|
---|
2683 | uint8_t idxHstReg /*= UINT8_MAX*/, uint8_t idxHstRegPrev /*= UINT8_MAX*/)
|
---|
2684 | {
|
---|
2685 | PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
|
---|
2686 | pEntry->GuestRegShadowing.uType = kIemTbDbgEntryType_GuestRegShadowing;
|
---|
2687 | pEntry->GuestRegShadowing.uUnused = 0;
|
---|
2688 | pEntry->GuestRegShadowing.idxGstReg = enmGstReg;
|
---|
2689 | pEntry->GuestRegShadowing.idxHstReg = idxHstReg;
|
---|
2690 | pEntry->GuestRegShadowing.idxHstRegPrev = idxHstRegPrev;
|
---|
2691 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
2692 | Assert( idxHstReg != UINT8_MAX
|
---|
2693 | || !(pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(enmGstReg)));
|
---|
2694 | #endif
|
---|
2695 | }
|
---|
2696 |
|
---|
2697 |
|
---|
2698 | # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
2699 | /**
|
---|
2700 | * Debug Info: Record info about guest register shadowing.
|
---|
2701 | */
|
---|
2702 | DECL_HIDDEN_THROW(void)
|
---|
2703 | iemNativeDbgInfoAddGuestSimdRegShadowing(PIEMRECOMPILERSTATE pReNative, IEMNATIVEGSTSIMDREG enmGstSimdReg,
|
---|
2704 | uint8_t idxHstSimdReg /*= UINT8_MAX*/, uint8_t idxHstSimdRegPrev /*= UINT8_MAX*/)
|
---|
2705 | {
|
---|
2706 | PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
|
---|
2707 | pEntry->GuestSimdRegShadowing.uType = kIemTbDbgEntryType_GuestSimdRegShadowing;
|
---|
2708 | pEntry->GuestSimdRegShadowing.uUnused = 0;
|
---|
2709 | pEntry->GuestSimdRegShadowing.idxGstSimdReg = enmGstSimdReg;
|
---|
2710 | pEntry->GuestSimdRegShadowing.idxHstSimdReg = idxHstSimdReg;
|
---|
2711 | pEntry->GuestSimdRegShadowing.idxHstSimdRegPrev = idxHstSimdRegPrev;
|
---|
2712 | }
|
---|
2713 | # endif
|
---|
2714 |
|
---|
2715 |
|
---|
2716 | # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
2717 | /**
|
---|
2718 | * Debug Info: Record info about delayed RIP updates.
|
---|
2719 | */
|
---|
2720 | DECL_HIDDEN_THROW(void) iemNativeDbgInfoAddDelayedPcUpdate(PIEMRECOMPILERSTATE pReNative, uint64_t offPc, uint32_t cInstrSkipped)
|
---|
2721 | {
|
---|
2722 | PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
|
---|
2723 | pEntry->DelayedPcUpdate.uType = kIemTbDbgEntryType_DelayedPcUpdate;
|
---|
2724 | pEntry->DelayedPcUpdate.cInstrSkipped = cInstrSkipped;
|
---|
2725 | pEntry->DelayedPcUpdate.offPc = offPc; /** @todo support larger values */
|
---|
2726 | }
|
---|
2727 | # endif
|
---|
2728 |
|
---|
2729 | # if defined(IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK) || defined(IEMNATIVE_WITH_SIMD_REG_ALLOCATOR)
|
---|
2730 |
|
---|
2731 | /**
|
---|
2732 | * Debug Info: Record info about a dirty guest register.
|
---|
2733 | */
|
---|
2734 | DECL_HIDDEN_THROW(void) iemNativeDbgInfoAddGuestRegDirty(PIEMRECOMPILERSTATE pReNative, bool fSimdReg,
|
---|
2735 | uint8_t idxGstReg, uint8_t idxHstReg)
|
---|
2736 | {
|
---|
2737 | PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
|
---|
2738 | pEntry->GuestRegDirty.uType = kIemTbDbgEntryType_GuestRegDirty;
|
---|
2739 | pEntry->GuestRegDirty.fSimdReg = fSimdReg ? 1 : 0;
|
---|
2740 | pEntry->GuestRegDirty.idxGstReg = idxGstReg;
|
---|
2741 | pEntry->GuestRegDirty.idxHstReg = idxHstReg;
|
---|
2742 | }
|
---|
2743 |
|
---|
2744 |
|
---|
2745 | /**
|
---|
2746 | * Debug Info: Record info about a dirty guest register writeback operation.
|
---|
2747 | */
|
---|
2748 | DECL_HIDDEN_THROW(void) iemNativeDbgInfoAddGuestRegWriteback(PIEMRECOMPILERSTATE pReNative, bool fSimdReg, uint64_t fGstReg)
|
---|
2749 | {
|
---|
2750 | unsigned const cBitsGstRegMask = 25;
|
---|
2751 | uint32_t const fGstRegMask = RT_BIT_32(cBitsGstRegMask) - 1U;
|
---|
2752 |
|
---|
2753 | /* The first block of 25 bits: */
|
---|
2754 | if (fGstReg & fGstRegMask)
|
---|
2755 | {
|
---|
2756 | PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
|
---|
2757 | pEntry->GuestRegWriteback.uType = kIemTbDbgEntryType_GuestRegWriteback;
|
---|
2758 | pEntry->GuestRegWriteback.fSimdReg = fSimdReg ? 1 : 0;
|
---|
2759 | pEntry->GuestRegWriteback.cShift = 0;
|
---|
2760 | pEntry->GuestRegWriteback.fGstReg = (uint32_t)(fGstReg & fGstRegMask);
|
---|
2761 | fGstReg &= ~(uint64_t)fGstRegMask;
|
---|
2762 | if (!fGstReg)
|
---|
2763 | return;
|
---|
2764 | }
|
---|
2765 |
|
---|
2766 | /* The second block of 25 bits: */
|
---|
2767 | fGstReg >>= cBitsGstRegMask;
|
---|
2768 | if (fGstReg & fGstRegMask)
|
---|
2769 | {
|
---|
2770 | PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
|
---|
2771 | pEntry->GuestRegWriteback.uType = kIemTbDbgEntryType_GuestRegWriteback;
|
---|
2772 | pEntry->GuestRegWriteback.fSimdReg = fSimdReg ? 1 : 0;
|
---|
2773 | pEntry->GuestRegWriteback.cShift = 0;
|
---|
2774 | pEntry->GuestRegWriteback.fGstReg = (uint32_t)(fGstReg & fGstRegMask);
|
---|
2775 | fGstReg &= ~(uint64_t)fGstRegMask;
|
---|
2776 | if (!fGstReg)
|
---|
2777 | return;
|
---|
2778 | }
|
---|
2779 |
|
---|
2780 | /* The last block with 14 bits: */
|
---|
2781 | fGstReg >>= cBitsGstRegMask;
|
---|
2782 | Assert(fGstReg & fGstRegMask);
|
---|
2783 | Assert((fGstReg & ~(uint64_t)fGstRegMask) == 0);
|
---|
2784 | PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
|
---|
2785 | pEntry->GuestRegWriteback.uType = kIemTbDbgEntryType_GuestRegWriteback;
|
---|
2786 | pEntry->GuestRegWriteback.fSimdReg = fSimdReg ? 1 : 0;
|
---|
2787 | pEntry->GuestRegWriteback.cShift = 2;
|
---|
2788 | pEntry->GuestRegWriteback.fGstReg = (uint32_t)(fGstReg & fGstRegMask);
|
---|
2789 | }
|
---|
2790 |
|
---|
2791 | # endif /* defined(IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK) || defined(IEMNATIVE_WITH_SIMD_REG_ALLOCATOR) */
|
---|
2792 |
|
---|
2793 | #endif /* IEMNATIVE_WITH_TB_DEBUG_INFO */
|
---|
2794 |
|
---|
2795 |
|
---|
2796 | /*********************************************************************************************************************************
|
---|
2797 | * Register Allocator *
|
---|
2798 | *********************************************************************************************************************************/
|
---|
2799 |
|
---|
2800 | /**
|
---|
2801 | * Register parameter indexes (indexed by argument number).
|
---|
2802 | */
|
---|
2803 | DECL_HIDDEN_CONST(uint8_t) const g_aidxIemNativeCallRegs[] =
|
---|
2804 | {
|
---|
2805 | IEMNATIVE_CALL_ARG0_GREG,
|
---|
2806 | IEMNATIVE_CALL_ARG1_GREG,
|
---|
2807 | IEMNATIVE_CALL_ARG2_GREG,
|
---|
2808 | IEMNATIVE_CALL_ARG3_GREG,
|
---|
2809 | #if defined(IEMNATIVE_CALL_ARG4_GREG)
|
---|
2810 | IEMNATIVE_CALL_ARG4_GREG,
|
---|
2811 | # if defined(IEMNATIVE_CALL_ARG5_GREG)
|
---|
2812 | IEMNATIVE_CALL_ARG5_GREG,
|
---|
2813 | # if defined(IEMNATIVE_CALL_ARG6_GREG)
|
---|
2814 | IEMNATIVE_CALL_ARG6_GREG,
|
---|
2815 | # if defined(IEMNATIVE_CALL_ARG7_GREG)
|
---|
2816 | IEMNATIVE_CALL_ARG7_GREG,
|
---|
2817 | # endif
|
---|
2818 | # endif
|
---|
2819 | # endif
|
---|
2820 | #endif
|
---|
2821 | };
|
---|
2822 | AssertCompile(RT_ELEMENTS(g_aidxIemNativeCallRegs) == IEMNATIVE_CALL_ARG_GREG_COUNT);
|
---|
2823 |
|
---|
2824 | /**
|
---|
2825 | * Call register masks indexed by argument count.
|
---|
2826 | */
|
---|
2827 | DECL_HIDDEN_CONST(uint32_t) const g_afIemNativeCallRegs[] =
|
---|
2828 | {
|
---|
2829 | 0,
|
---|
2830 | RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG),
|
---|
2831 | RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG),
|
---|
2832 | RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG),
|
---|
2833 | RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG)
|
---|
2834 | | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG),
|
---|
2835 | #if defined(IEMNATIVE_CALL_ARG4_GREG)
|
---|
2836 | RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG)
|
---|
2837 | | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG4_GREG),
|
---|
2838 | # if defined(IEMNATIVE_CALL_ARG5_GREG)
|
---|
2839 | RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG)
|
---|
2840 | | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG4_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG5_GREG),
|
---|
2841 | # if defined(IEMNATIVE_CALL_ARG6_GREG)
|
---|
2842 | RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG)
|
---|
2843 | | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG4_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG5_GREG)
|
---|
2844 | | RT_BIT_32(IEMNATIVE_CALL_ARG6_GREG),
|
---|
2845 | # if defined(IEMNATIVE_CALL_ARG7_GREG)
|
---|
2846 | RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG)
|
---|
2847 | | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG4_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG5_GREG)
|
---|
2848 | | RT_BIT_32(IEMNATIVE_CALL_ARG6_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG7_GREG),
|
---|
2849 | # endif
|
---|
2850 | # endif
|
---|
2851 | # endif
|
---|
2852 | #endif
|
---|
2853 | };
|
---|
2854 |
|
---|
2855 | #ifdef IEMNATIVE_FP_OFF_STACK_ARG0
|
---|
2856 | /**
|
---|
2857 | * BP offset of the stack argument slots.
|
---|
2858 | *
|
---|
2859 | * This array is indexed by \#argument - IEMNATIVE_CALL_ARG_GREG_COUNT and has
|
---|
2860 | * IEMNATIVE_FRAME_STACK_ARG_COUNT entries.
|
---|
2861 | */
|
---|
2862 | DECL_HIDDEN_CONST(int32_t) const g_aoffIemNativeCallStackArgBpDisp[] =
|
---|
2863 | {
|
---|
2864 | IEMNATIVE_FP_OFF_STACK_ARG0,
|
---|
2865 | # ifdef IEMNATIVE_FP_OFF_STACK_ARG1
|
---|
2866 | IEMNATIVE_FP_OFF_STACK_ARG1,
|
---|
2867 | # endif
|
---|
2868 | # ifdef IEMNATIVE_FP_OFF_STACK_ARG2
|
---|
2869 | IEMNATIVE_FP_OFF_STACK_ARG2,
|
---|
2870 | # endif
|
---|
2871 | # ifdef IEMNATIVE_FP_OFF_STACK_ARG3
|
---|
2872 | IEMNATIVE_FP_OFF_STACK_ARG3,
|
---|
2873 | # endif
|
---|
2874 | };
|
---|
2875 | AssertCompile(RT_ELEMENTS(g_aoffIemNativeCallStackArgBpDisp) == IEMNATIVE_FRAME_STACK_ARG_COUNT);
|
---|
2876 | #endif /* IEMNATIVE_FP_OFF_STACK_ARG0 */
|
---|
2877 |
|
---|
2878 | /**
|
---|
2879 | * Info about shadowed guest register values.
|
---|
2880 | * @see IEMNATIVEGSTREG
|
---|
2881 | */
|
---|
2882 | DECL_HIDDEN_CONST(IEMANTIVEGSTREGINFO const) g_aGstShadowInfo[] =
|
---|
2883 | {
|
---|
2884 | #define CPUMCTX_OFF_AND_SIZE(a_Reg) (uint32_t)RT_UOFFSETOF(VMCPU, cpum.GstCtx. a_Reg), RT_SIZEOFMEMB(VMCPU, cpum.GstCtx. a_Reg)
|
---|
2885 | /* [kIemNativeGstReg_GprFirst + X86_GREG_xAX] = */ { CPUMCTX_OFF_AND_SIZE(rax), "rax", },
|
---|
2886 | /* [kIemNativeGstReg_GprFirst + X86_GREG_xCX] = */ { CPUMCTX_OFF_AND_SIZE(rcx), "rcx", },
|
---|
2887 | /* [kIemNativeGstReg_GprFirst + X86_GREG_xDX] = */ { CPUMCTX_OFF_AND_SIZE(rdx), "rdx", },
|
---|
2888 | /* [kIemNativeGstReg_GprFirst + X86_GREG_xBX] = */ { CPUMCTX_OFF_AND_SIZE(rbx), "rbx", },
|
---|
2889 | /* [kIemNativeGstReg_GprFirst + X86_GREG_xSP] = */ { CPUMCTX_OFF_AND_SIZE(rsp), "rsp", },
|
---|
2890 | /* [kIemNativeGstReg_GprFirst + X86_GREG_xBP] = */ { CPUMCTX_OFF_AND_SIZE(rbp), "rbp", },
|
---|
2891 | /* [kIemNativeGstReg_GprFirst + X86_GREG_xSI] = */ { CPUMCTX_OFF_AND_SIZE(rsi), "rsi", },
|
---|
2892 | /* [kIemNativeGstReg_GprFirst + X86_GREG_xDI] = */ { CPUMCTX_OFF_AND_SIZE(rdi), "rdi", },
|
---|
2893 | /* [kIemNativeGstReg_GprFirst + X86_GREG_x8 ] = */ { CPUMCTX_OFF_AND_SIZE(r8), "r8", },
|
---|
2894 | /* [kIemNativeGstReg_GprFirst + X86_GREG_x9 ] = */ { CPUMCTX_OFF_AND_SIZE(r9), "r9", },
|
---|
2895 | /* [kIemNativeGstReg_GprFirst + X86_GREG_x10] = */ { CPUMCTX_OFF_AND_SIZE(r10), "r10", },
|
---|
2896 | /* [kIemNativeGstReg_GprFirst + X86_GREG_x11] = */ { CPUMCTX_OFF_AND_SIZE(r11), "r11", },
|
---|
2897 | /* [kIemNativeGstReg_GprFirst + X86_GREG_x12] = */ { CPUMCTX_OFF_AND_SIZE(r12), "r12", },
|
---|
2898 | /* [kIemNativeGstReg_GprFirst + X86_GREG_x13] = */ { CPUMCTX_OFF_AND_SIZE(r13), "r13", },
|
---|
2899 | /* [kIemNativeGstReg_GprFirst + X86_GREG_x14] = */ { CPUMCTX_OFF_AND_SIZE(r14), "r14", },
|
---|
2900 | /* [kIemNativeGstReg_GprFirst + X86_GREG_x15] = */ { CPUMCTX_OFF_AND_SIZE(r15), "r15", },
|
---|
2901 | /* [kIemNativeGstReg_Pc] = */ { CPUMCTX_OFF_AND_SIZE(rip), "rip", },
|
---|
2902 | /* [kIemNativeGstReg_Cr0] = */ { CPUMCTX_OFF_AND_SIZE(cr0), "cr0", },
|
---|
2903 | /* [kIemNativeGstReg_FpuFcw] = */ { CPUMCTX_OFF_AND_SIZE(XState.x87.FCW), "fcw", },
|
---|
2904 | /* [kIemNativeGstReg_FpuFsw] = */ { CPUMCTX_OFF_AND_SIZE(XState.x87.FSW), "fsw", },
|
---|
2905 | /* [kIemNativeGstReg_SegBaseFirst + 0] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[0].u64Base), "es_base", },
|
---|
2906 | /* [kIemNativeGstReg_SegBaseFirst + 1] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[1].u64Base), "cs_base", },
|
---|
2907 | /* [kIemNativeGstReg_SegBaseFirst + 2] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[2].u64Base), "ss_base", },
|
---|
2908 | /* [kIemNativeGstReg_SegBaseFirst + 3] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[3].u64Base), "ds_base", },
|
---|
2909 | /* [kIemNativeGstReg_SegBaseFirst + 4] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[4].u64Base), "fs_base", },
|
---|
2910 | /* [kIemNativeGstReg_SegBaseFirst + 5] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[5].u64Base), "gs_base", },
|
---|
2911 | /* [kIemNativeGstReg_SegAttribFirst + 0] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[0].Attr.u), "es_attrib", },
|
---|
2912 | /* [kIemNativeGstReg_SegAttribFirst + 1] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[1].Attr.u), "cs_attrib", },
|
---|
2913 | /* [kIemNativeGstReg_SegAttribFirst + 2] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[2].Attr.u), "ss_attrib", },
|
---|
2914 | /* [kIemNativeGstReg_SegAttribFirst + 3] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[3].Attr.u), "ds_attrib", },
|
---|
2915 | /* [kIemNativeGstReg_SegAttribFirst + 4] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[4].Attr.u), "fs_attrib", },
|
---|
2916 | /* [kIemNativeGstReg_SegAttribFirst + 5] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[5].Attr.u), "gs_attrib", },
|
---|
2917 | /* [kIemNativeGstReg_SegLimitFirst + 0] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[0].u32Limit), "es_limit", },
|
---|
2918 | /* [kIemNativeGstReg_SegLimitFirst + 1] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[1].u32Limit), "cs_limit", },
|
---|
2919 | /* [kIemNativeGstReg_SegLimitFirst + 2] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[2].u32Limit), "ss_limit", },
|
---|
2920 | /* [kIemNativeGstReg_SegLimitFirst + 3] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[3].u32Limit), "ds_limit", },
|
---|
2921 | /* [kIemNativeGstReg_SegLimitFirst + 4] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[4].u32Limit), "fs_limit", },
|
---|
2922 | /* [kIemNativeGstReg_SegLimitFirst + 5] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[5].u32Limit), "gs_limit", },
|
---|
2923 | /* [kIemNativeGstReg_SegSelFirst + 0] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[0].Sel), "es", },
|
---|
2924 | /* [kIemNativeGstReg_SegSelFirst + 1] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[1].Sel), "cs", },
|
---|
2925 | /* [kIemNativeGstReg_SegSelFirst + 2] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[2].Sel), "ss", },
|
---|
2926 | /* [kIemNativeGstReg_SegSelFirst + 3] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[3].Sel), "ds", },
|
---|
2927 | /* [kIemNativeGstReg_SegSelFirst + 4] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[4].Sel), "fs", },
|
---|
2928 | /* [kIemNativeGstReg_SegSelFirst + 5] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[5].Sel), "gs", },
|
---|
2929 | /* [kIemNativeGstReg_Cr4] = */ { CPUMCTX_OFF_AND_SIZE(cr4), "cr4", },
|
---|
2930 | /* [kIemNativeGstReg_Xcr0] = */ { CPUMCTX_OFF_AND_SIZE(aXcr[0]), "xcr0", },
|
---|
2931 | /* [kIemNativeGstReg_MxCsr] = */ { CPUMCTX_OFF_AND_SIZE(XState.x87.MXCSR), "mxcsr", },
|
---|
2932 | /* [kIemNativeGstReg_EFlags] = */ { CPUMCTX_OFF_AND_SIZE(eflags), "eflags", },
|
---|
2933 | #undef CPUMCTX_OFF_AND_SIZE
|
---|
2934 | };
|
---|
2935 | AssertCompile(RT_ELEMENTS(g_aGstShadowInfo) == kIemNativeGstReg_End);
|
---|
2936 |
|
---|
2937 |
|
---|
2938 | /** Host CPU general purpose register names. */
|
---|
2939 | DECL_HIDDEN_CONST(const char * const) g_apszIemNativeHstRegNames[] =
|
---|
2940 | {
|
---|
2941 | #ifdef RT_ARCH_AMD64
|
---|
2942 | "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
|
---|
2943 | #elif RT_ARCH_ARM64
|
---|
2944 | "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
|
---|
2945 | "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "bp", "lr", "sp/xzr",
|
---|
2946 | #else
|
---|
2947 | # error "port me"
|
---|
2948 | #endif
|
---|
2949 | };
|
---|
2950 |
|
---|
2951 |
|
---|
2952 | #if 0 /* unused */
|
---|
2953 | /**
|
---|
2954 | * Tries to locate a suitable register in the given register mask.
|
---|
2955 | *
|
---|
2956 | * This ASSUMES the caller has done the minimal/optimal allocation checks and
|
---|
2957 | * failed.
|
---|
2958 | *
|
---|
2959 | * @returns Host register number on success, returns UINT8_MAX on failure.
|
---|
2960 | */
|
---|
2961 | static uint8_t iemNativeRegTryAllocFree(PIEMRECOMPILERSTATE pReNative, uint32_t fRegMask)
|
---|
2962 | {
|
---|
2963 | Assert(!(fRegMask & ~IEMNATIVE_HST_GREG_MASK));
|
---|
2964 | uint32_t fRegs = ~pReNative->Core.bmHstRegs & fRegMask;
|
---|
2965 | if (fRegs)
|
---|
2966 | {
|
---|
2967 | /** @todo pick better here: */
|
---|
2968 | unsigned const idxReg = ASMBitFirstSetU32(fRegs) - 1;
|
---|
2969 |
|
---|
2970 | Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows != 0);
|
---|
2971 | Assert( (pReNative->Core.aHstRegs[idxReg].fGstRegShadows & pReNative->Core.bmGstRegShadows)
|
---|
2972 | == pReNative->Core.aHstRegs[idxReg].fGstRegShadows);
|
---|
2973 | Assert(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg));
|
---|
2974 |
|
---|
2975 | pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxReg].fGstRegShadows;
|
---|
2976 | pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxReg);
|
---|
2977 | pReNative->Core.aHstRegs[idxReg].fGstRegShadows = 0;
|
---|
2978 | return idxReg;
|
---|
2979 | }
|
---|
2980 | return UINT8_MAX;
|
---|
2981 | }
|
---|
2982 | #endif /* unused */
|
---|
2983 |
|
---|
2984 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
2985 |
|
---|
2986 | /**
|
---|
2987 | * Stores the host reg @a idxHstReg into guest shadow register @a enmGstReg.
|
---|
2988 | *
|
---|
2989 | * @returns New code buffer offset on success, UINT32_MAX on failure.
|
---|
2990 | * @param pReNative .
|
---|
2991 | * @param off The current code buffer position.
|
---|
2992 | * @param enmGstReg The guest register to store to.
|
---|
2993 | * @param idxHstReg The host register to store from.
|
---|
2994 | */
|
---|
2995 | DECL_FORCE_INLINE_THROW(uint32_t)
|
---|
2996 | iemNativeEmitStoreGprWithGstShadowReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVEGSTREG enmGstReg, uint8_t idxHstReg)
|
---|
2997 | {
|
---|
2998 | Assert((unsigned)enmGstReg < (unsigned)kIemNativeGstReg_End);
|
---|
2999 | Assert(g_aGstShadowInfo[enmGstReg].cb != 0);
|
---|
3000 |
|
---|
3001 | switch (g_aGstShadowInfo[enmGstReg].cb)
|
---|
3002 | {
|
---|
3003 | case sizeof(uint64_t):
|
---|
3004 | return iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off);
|
---|
3005 | case sizeof(uint32_t):
|
---|
3006 | return iemNativeEmitStoreGprToVCpuU32(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off);
|
---|
3007 | case sizeof(uint16_t):
|
---|
3008 | return iemNativeEmitStoreGprToVCpuU16(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off);
|
---|
3009 | # if 0 /* not present in the table. */
|
---|
3010 | case sizeof(uint8_t):
|
---|
3011 | return iemNativeEmitStoreGprToVCpuU8(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off);
|
---|
3012 | # endif
|
---|
3013 | default:
|
---|
3014 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IPE_NOT_REACHED_DEFAULT_CASE));
|
---|
3015 | }
|
---|
3016 | }
|
---|
3017 |
|
---|
3018 |
|
---|
3019 | /**
|
---|
3020 | * Emits code to flush a pending write of the given guest register,
|
---|
3021 | * version with alternative core state.
|
---|
3022 | *
|
---|
3023 | * @returns New code buffer offset.
|
---|
3024 | * @param pReNative The native recompile state.
|
---|
3025 | * @param off Current code buffer position.
|
---|
3026 | * @param pCore Alternative core state.
|
---|
3027 | * @param enmGstReg The guest register to flush.
|
---|
3028 | */
|
---|
3029 | DECL_HIDDEN_THROW(uint32_t)
|
---|
3030 | iemNativeRegFlushPendingWriteEx(PIEMRECOMPILERSTATE pReNative, uint32_t off, PIEMNATIVECORESTATE pCore, IEMNATIVEGSTREG enmGstReg)
|
---|
3031 | {
|
---|
3032 | uint8_t const idxHstReg = pCore->aidxGstRegShadows[enmGstReg];
|
---|
3033 |
|
---|
3034 | Assert( ( enmGstReg >= kIemNativeGstReg_GprFirst
|
---|
3035 | && enmGstReg <= kIemNativeGstReg_GprLast)
|
---|
3036 | || enmGstReg == kIemNativeGstReg_MxCsr);
|
---|
3037 | Assert( idxHstReg != UINT8_MAX
|
---|
3038 | && pCore->bmGstRegShadowDirty & RT_BIT_64(enmGstReg));
|
---|
3039 | Log12(("iemNativeRegFlushPendingWriteEx: Clearing guest register %s shadowed by host %s (off=%#x)\n",
|
---|
3040 | g_aGstShadowInfo[enmGstReg].pszName, g_apszIemNativeHstRegNames[idxHstReg], off));
|
---|
3041 |
|
---|
3042 | off = iemNativeEmitStoreGprWithGstShadowReg(pReNative, off, enmGstReg, idxHstReg);
|
---|
3043 |
|
---|
3044 | pCore->bmGstRegShadowDirty &= ~RT_BIT_64(enmGstReg);
|
---|
3045 | return off;
|
---|
3046 | }
|
---|
3047 |
|
---|
3048 |
|
---|
3049 | /**
|
---|
3050 | * Emits code to flush a pending write of the given guest register.
|
---|
3051 | *
|
---|
3052 | * @returns New code buffer offset.
|
---|
3053 | * @param pReNative The native recompile state.
|
---|
3054 | * @param off Current code buffer position.
|
---|
3055 | * @param enmGstReg The guest register to flush.
|
---|
3056 | */
|
---|
3057 | DECL_HIDDEN_THROW(uint32_t)
|
---|
3058 | iemNativeRegFlushPendingWrite(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVEGSTREG enmGstReg)
|
---|
3059 | {
|
---|
3060 | uint8_t const idxHstReg = pReNative->Core.aidxGstRegShadows[enmGstReg];
|
---|
3061 |
|
---|
3062 | Assert( ( enmGstReg >= kIemNativeGstReg_GprFirst
|
---|
3063 | && enmGstReg <= kIemNativeGstReg_GprLast)
|
---|
3064 | || enmGstReg == kIemNativeGstReg_MxCsr);
|
---|
3065 | Assert( idxHstReg != UINT8_MAX
|
---|
3066 | && pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(enmGstReg));
|
---|
3067 | Log12(("iemNativeRegFlushPendingWrite: Clearing guest register %s shadowed by host %s (off=%#x)\n",
|
---|
3068 | g_aGstShadowInfo[enmGstReg].pszName, g_apszIemNativeHstRegNames[idxHstReg], off));
|
---|
3069 |
|
---|
3070 | off = iemNativeEmitStoreGprWithGstShadowReg(pReNative, off, enmGstReg, idxHstReg);
|
---|
3071 |
|
---|
3072 | pReNative->Core.bmGstRegShadowDirty &= ~RT_BIT_64(enmGstReg);
|
---|
3073 | return off;
|
---|
3074 | }
|
---|
3075 |
|
---|
3076 |
|
---|
3077 | /**
|
---|
3078 | * Flush the given set of guest registers if marked as dirty.
|
---|
3079 | *
|
---|
3080 | * @returns New code buffer offset.
|
---|
3081 | * @param pReNative The native recompile state.
|
---|
3082 | * @param off Current code buffer position.
|
---|
3083 | * @param fFlushGstReg The guest register set to flush (default is flush everything).
|
---|
3084 | */
|
---|
3085 | DECL_HIDDEN_THROW(uint32_t)
|
---|
3086 | iemNativeRegFlushDirtyGuest(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint64_t fFlushGstReg /*= UINT64_MAX*/)
|
---|
3087 | {
|
---|
3088 | uint64_t bmGstRegShadowDirty = pReNative->Core.bmGstRegShadowDirty & fFlushGstReg;
|
---|
3089 | if (bmGstRegShadowDirty)
|
---|
3090 | {
|
---|
3091 | # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
3092 | iemNativeDbgInfoAddNativeOffset(pReNative, off);
|
---|
3093 | iemNativeDbgInfoAddGuestRegWriteback(pReNative, false /*fSimdReg*/, bmGstRegShadowDirty);
|
---|
3094 | # endif
|
---|
3095 | do
|
---|
3096 | {
|
---|
3097 | unsigned const idxGstReg = ASMBitFirstSetU64(bmGstRegShadowDirty) - 1;
|
---|
3098 | bmGstRegShadowDirty &= ~RT_BIT_64(idxGstReg);
|
---|
3099 | off = iemNativeRegFlushPendingWrite(pReNative, off, (IEMNATIVEGSTREG)idxGstReg);
|
---|
3100 | Assert(!(pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(idxGstReg)));
|
---|
3101 | } while (bmGstRegShadowDirty);
|
---|
3102 | }
|
---|
3103 |
|
---|
3104 | return off;
|
---|
3105 | }
|
---|
3106 |
|
---|
3107 |
|
---|
3108 | /**
|
---|
3109 | * Flush all shadowed guest registers marked as dirty for the given host register.
|
---|
3110 | *
|
---|
3111 | * @returns New code buffer offset.
|
---|
3112 | * @param pReNative The native recompile state.
|
---|
3113 | * @param off Current code buffer position.
|
---|
3114 | * @param idxHstReg The host register.
|
---|
3115 | *
|
---|
3116 | * @note This doesn't do any unshadowing of guest registers from the host register.
|
---|
3117 | */
|
---|
3118 | DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushDirtyGuestByHostRegShadow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxHstReg)
|
---|
3119 | {
|
---|
3120 | /* We need to flush any pending guest register writes this host register shadows. */
|
---|
3121 | uint64_t fGstRegShadows = pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows;
|
---|
3122 | if (pReNative->Core.bmGstRegShadowDirty & fGstRegShadows)
|
---|
3123 | {
|
---|
3124 | # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
3125 | iemNativeDbgInfoAddNativeOffset(pReNative, off);
|
---|
3126 | iemNativeDbgInfoAddGuestRegWriteback(pReNative, false /*fSimdReg*/, pReNative->Core.bmGstRegShadowDirty & fGstRegShadows);
|
---|
3127 | # endif
|
---|
3128 | uint64_t bmGstRegShadowDirty = pReNative->Core.bmGstRegShadowDirty & fGstRegShadows;
|
---|
3129 | do
|
---|
3130 | {
|
---|
3131 | unsigned const idxGstReg = ASMBitFirstSetU64(bmGstRegShadowDirty) - 1;
|
---|
3132 | bmGstRegShadowDirty &= ~RT_BIT_64(idxGstReg);
|
---|
3133 | off = iemNativeRegFlushPendingWrite(pReNative, off, (IEMNATIVEGSTREG)idxGstReg);
|
---|
3134 | Assert(!(pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(idxGstReg)));
|
---|
3135 | } while (bmGstRegShadowDirty);
|
---|
3136 | }
|
---|
3137 |
|
---|
3138 | return off;
|
---|
3139 | }
|
---|
3140 |
|
---|
3141 | #endif /* IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK */
|
---|
3142 |
|
---|
3143 |
|
---|
3144 | /**
|
---|
3145 | * Locate a register, possibly freeing one up.
|
---|
3146 | *
|
---|
3147 | * This ASSUMES the caller has done the minimal/optimal allocation checks and
|
---|
3148 | * failed.
|
---|
3149 | *
|
---|
3150 | * @returns Host register number on success. Returns UINT8_MAX if no registers
|
---|
3151 | * found, the caller is supposed to deal with this and raise a
|
---|
3152 | * allocation type specific status code (if desired).
|
---|
3153 | *
|
---|
3154 | * @throws VBox status code if we're run into trouble spilling a variable of
|
---|
3155 | * recording debug info. Does NOT throw anything if we're out of
|
---|
3156 | * registers, though.
|
---|
3157 | */
|
---|
3158 | static uint8_t iemNativeRegAllocFindFree(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fPreferVolatile,
|
---|
3159 | uint32_t fRegMask = IEMNATIVE_HST_GREG_MASK & ~IEMNATIVE_REG_FIXED_MASK)
|
---|
3160 | {
|
---|
3161 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFree);
|
---|
3162 | Assert(!(fRegMask & ~IEMNATIVE_HST_GREG_MASK));
|
---|
3163 | Assert(!(fRegMask & IEMNATIVE_REG_FIXED_MASK));
|
---|
3164 |
|
---|
3165 | /*
|
---|
3166 | * Try a freed register that's shadowing a guest register.
|
---|
3167 | */
|
---|
3168 | uint32_t fRegs = ~pReNative->Core.bmHstRegs & fRegMask;
|
---|
3169 | if (fRegs)
|
---|
3170 | {
|
---|
3171 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeNoVar);
|
---|
3172 |
|
---|
3173 | #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
|
---|
3174 | /*
|
---|
3175 | * When we have livness information, we use it to kick out all shadowed
|
---|
3176 | * guest register that will not be needed any more in this TB. If we're
|
---|
3177 | * lucky, this may prevent us from ending up here again.
|
---|
3178 | *
|
---|
3179 | * Note! We must consider the previous entry here so we don't free
|
---|
3180 | * anything that the current threaded function requires (current
|
---|
3181 | * entry is produced by the next threaded function).
|
---|
3182 | */
|
---|
3183 | uint32_t const idxCurCall = pReNative->idxCurCall;
|
---|
3184 | if (idxCurCall > 0)
|
---|
3185 | {
|
---|
3186 | PCIEMLIVENESSENTRY const pLivenessEntry = &pReNative->paLivenessEntries[idxCurCall - 1];
|
---|
3187 |
|
---|
3188 | # ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
3189 | /* Construct a mask of the guest registers in the UNUSED and XCPT_OR_CALL state. */
|
---|
3190 | AssertCompile(IEMLIVENESS_STATE_UNUSED == 1 && IEMLIVENESS_STATE_XCPT_OR_CALL == 2);
|
---|
3191 | uint64_t fToFreeMask = pLivenessEntry->Bit0.bm64 ^ pLivenessEntry->Bit1.bm64; /* mask of regs in either UNUSED */
|
---|
3192 | #else
|
---|
3193 | /* Construct a mask of the registers not in the read or write state.
|
---|
3194 | Note! We could skips writes, if they aren't from us, as this is just
|
---|
3195 | a hack to prevent trashing registers that have just been written
|
---|
3196 | or will be written when we retire the current instruction. */
|
---|
3197 | uint64_t fToFreeMask = ~pLivenessEntry->aBits[IEMLIVENESS_BIT_READ].bm64
|
---|
3198 | & ~pLivenessEntry->aBits[IEMLIVENESS_BIT_WRITE].bm64
|
---|
3199 | & IEMLIVENESSBIT_MASK;
|
---|
3200 | #endif
|
---|
3201 | /* Merge EFLAGS. */
|
---|
3202 | uint64_t fTmp = fToFreeMask & (fToFreeMask >> 3); /* AF2,PF2,CF2,Other2 = AF,PF,CF,Other & OF,SF,ZF,AF */
|
---|
3203 | fTmp &= fTmp >> 2; /* CF3,Other3 = AF2,PF2 & CF2,Other2 */
|
---|
3204 | fTmp &= fTmp >> 1; /* Other4 = CF3 & Other3 */
|
---|
3205 | fToFreeMask &= RT_BIT_64(kIemNativeGstReg_EFlags) - 1;
|
---|
3206 | fToFreeMask |= fTmp & RT_BIT_64(kIemNativeGstReg_EFlags);
|
---|
3207 |
|
---|
3208 | /* If it matches any shadowed registers. */
|
---|
3209 | if (pReNative->Core.bmGstRegShadows & fToFreeMask)
|
---|
3210 | {
|
---|
3211 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
3212 | /* Writeback any dirty shadow registers we are about to unshadow. */
|
---|
3213 | *poff = iemNativeRegFlushDirtyGuest(pReNative, *poff, fToFreeMask);
|
---|
3214 | #endif
|
---|
3215 |
|
---|
3216 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed);
|
---|
3217 | iemNativeRegFlushGuestShadows(pReNative, fToFreeMask);
|
---|
3218 | Assert(fRegs == (~pReNative->Core.bmHstRegs & fRegMask)); /* this shall not change. */
|
---|
3219 |
|
---|
3220 | /* See if we've got any unshadowed registers we can return now. */
|
---|
3221 | uint32_t const fUnshadowedRegs = fRegs & ~pReNative->Core.bmHstRegsWithGstShadow;
|
---|
3222 | if (fUnshadowedRegs)
|
---|
3223 | {
|
---|
3224 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped);
|
---|
3225 | return (fPreferVolatile
|
---|
3226 | ? ASMBitFirstSetU32(fUnshadowedRegs)
|
---|
3227 | : ASMBitLastSetU32( fUnshadowedRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK
|
---|
3228 | ? fUnshadowedRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK : fUnshadowedRegs))
|
---|
3229 | - 1;
|
---|
3230 | }
|
---|
3231 | }
|
---|
3232 | }
|
---|
3233 | #endif /* IEMNATIVE_WITH_LIVENESS_ANALYSIS */
|
---|
3234 |
|
---|
3235 | unsigned const idxReg = (fPreferVolatile
|
---|
3236 | ? ASMBitFirstSetU32(fRegs)
|
---|
3237 | : ASMBitLastSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK
|
---|
3238 | ? fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK : fRegs))
|
---|
3239 | - 1;
|
---|
3240 |
|
---|
3241 | Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows != 0);
|
---|
3242 | Assert( (pReNative->Core.aHstRegs[idxReg].fGstRegShadows & pReNative->Core.bmGstRegShadows)
|
---|
3243 | == pReNative->Core.aHstRegs[idxReg].fGstRegShadows);
|
---|
3244 | Assert(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg));
|
---|
3245 |
|
---|
3246 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
3247 | /* We need to flush any pending guest register writes this host register shadows. */
|
---|
3248 | *poff = iemNativeRegFlushDirtyGuestByHostRegShadow(pReNative, *poff, idxReg);
|
---|
3249 | #endif
|
---|
3250 |
|
---|
3251 | pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxReg);
|
---|
3252 | pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxReg].fGstRegShadows;
|
---|
3253 | pReNative->Core.aHstRegs[idxReg].fGstRegShadows = 0;
|
---|
3254 | return idxReg;
|
---|
3255 | }
|
---|
3256 |
|
---|
3257 | /*
|
---|
3258 | * Try free up a variable that's in a register.
|
---|
3259 | *
|
---|
3260 | * We do two rounds here, first evacuating variables we don't need to be
|
---|
3261 | * saved on the stack, then in the second round move things to the stack.
|
---|
3262 | */
|
---|
3263 | STAM_REL_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeVar);
|
---|
3264 | for (uint32_t iLoop = 0; iLoop < 2; iLoop++)
|
---|
3265 | {
|
---|
3266 | uint32_t fVars = pReNative->Core.bmVars;
|
---|
3267 | while (fVars)
|
---|
3268 | {
|
---|
3269 | uint32_t const idxVar = ASMBitFirstSetU32(fVars) - 1;
|
---|
3270 | uint8_t const idxReg = pReNative->Core.aVars[idxVar].idxReg;
|
---|
3271 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
3272 | if (pReNative->Core.aVars[idxVar].fSimdReg) /* Need to ignore SIMD variables here or we end up freeing random registers. */
|
---|
3273 | continue;
|
---|
3274 | #endif
|
---|
3275 |
|
---|
3276 | if ( idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs)
|
---|
3277 | && (RT_BIT_32(idxReg) & fRegMask)
|
---|
3278 | && ( iLoop == 0
|
---|
3279 | ? pReNative->Core.aVars[idxVar].enmKind != kIemNativeVarKind_Stack
|
---|
3280 | : pReNative->Core.aVars[idxVar].enmKind == kIemNativeVarKind_Stack)
|
---|
3281 | && !pReNative->Core.aVars[idxVar].fRegAcquired)
|
---|
3282 | {
|
---|
3283 | Assert(pReNative->Core.bmHstRegs & RT_BIT_32(idxReg));
|
---|
3284 | Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxReg].fGstRegShadows)
|
---|
3285 | == pReNative->Core.aHstRegs[idxReg].fGstRegShadows);
|
---|
3286 | Assert(pReNative->Core.bmGstRegShadows < RT_BIT_64(kIemNativeGstReg_End));
|
---|
3287 | Assert( RT_BOOL(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg))
|
---|
3288 | == RT_BOOL(pReNative->Core.aHstRegs[idxReg].fGstRegShadows));
|
---|
3289 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
3290 | Assert(!(pReNative->Core.aHstRegs[idxReg].fGstRegShadows & pReNative->Core.bmGstRegShadowDirty));
|
---|
3291 | #endif
|
---|
3292 |
|
---|
3293 | if (pReNative->Core.aVars[idxVar].enmKind == kIemNativeVarKind_Stack)
|
---|
3294 | {
|
---|
3295 | uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, IEMNATIVE_VAR_IDX_PACK(idxVar));
|
---|
3296 | *poff = iemNativeEmitStoreGprByBp(pReNative, *poff, iemNativeStackCalcBpDisp(idxStackSlot), idxReg);
|
---|
3297 | }
|
---|
3298 |
|
---|
3299 | pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX;
|
---|
3300 | pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxReg);
|
---|
3301 |
|
---|
3302 | pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxReg);
|
---|
3303 | pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxReg].fGstRegShadows;
|
---|
3304 | pReNative->Core.aHstRegs[idxReg].fGstRegShadows = 0;
|
---|
3305 | return idxReg;
|
---|
3306 | }
|
---|
3307 | fVars &= ~RT_BIT_32(idxVar);
|
---|
3308 | }
|
---|
3309 | }
|
---|
3310 |
|
---|
3311 | return UINT8_MAX;
|
---|
3312 | }
|
---|
3313 |
|
---|
3314 |
|
---|
3315 | /**
|
---|
3316 | * Reassigns a variable to a different register specified by the caller.
|
---|
3317 | *
|
---|
3318 | * @returns The new code buffer position.
|
---|
3319 | * @param pReNative The native recompile state.
|
---|
3320 | * @param off The current code buffer position.
|
---|
3321 | * @param idxVar The variable index.
|
---|
3322 | * @param idxRegOld The old host register number.
|
---|
3323 | * @param idxRegNew The new host register number.
|
---|
3324 | * @param pszCaller The caller for logging.
|
---|
3325 | */
|
---|
3326 | static uint32_t iemNativeRegMoveVar(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVar,
|
---|
3327 | uint8_t idxRegOld, uint8_t idxRegNew, const char *pszCaller)
|
---|
3328 | {
|
---|
3329 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
3330 | Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxRegOld);
|
---|
3331 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
3332 | Assert(!pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fSimdReg);
|
---|
3333 | #endif
|
---|
3334 | RT_NOREF(pszCaller);
|
---|
3335 |
|
---|
3336 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
3337 | Assert(!(pReNative->Core.aHstRegs[idxRegNew].fGstRegShadows & pReNative->Core.bmGstRegShadowDirty));
|
---|
3338 | #endif
|
---|
3339 | iemNativeRegClearGstRegShadowing(pReNative, idxRegNew, off);
|
---|
3340 |
|
---|
3341 | uint64_t fGstRegShadows = pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows;
|
---|
3342 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
3343 | Assert(!(fGstRegShadows & pReNative->Core.bmGstRegShadowDirty));
|
---|
3344 | #endif
|
---|
3345 | Log12(("%s: moving idxVar=%#x from %s to %s (fGstRegShadows=%RX64)\n",
|
---|
3346 | pszCaller, idxVar, g_apszIemNativeHstRegNames[idxRegOld], g_apszIemNativeHstRegNames[idxRegNew], fGstRegShadows));
|
---|
3347 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegNew, idxRegOld);
|
---|
3348 |
|
---|
3349 | pReNative->Core.aHstRegs[idxRegNew].fGstRegShadows = fGstRegShadows;
|
---|
3350 | pReNative->Core.aHstRegs[idxRegNew].enmWhat = kIemNativeWhat_Var;
|
---|
3351 | pReNative->Core.aHstRegs[idxRegNew].idxVar = idxVar;
|
---|
3352 | if (fGstRegShadows)
|
---|
3353 | {
|
---|
3354 | pReNative->Core.bmHstRegsWithGstShadow = (pReNative->Core.bmHstRegsWithGstShadow & ~RT_BIT_32(idxRegOld))
|
---|
3355 | | RT_BIT_32(idxRegNew);
|
---|
3356 | while (fGstRegShadows)
|
---|
3357 | {
|
---|
3358 | unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegShadows) - 1;
|
---|
3359 | fGstRegShadows &= ~RT_BIT_64(idxGstReg);
|
---|
3360 |
|
---|
3361 | Assert(pReNative->Core.aidxGstRegShadows[idxGstReg] == idxRegOld);
|
---|
3362 | pReNative->Core.aidxGstRegShadows[idxGstReg] = idxRegNew;
|
---|
3363 | }
|
---|
3364 | }
|
---|
3365 |
|
---|
3366 | pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg = (uint8_t)idxRegNew;
|
---|
3367 | pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows = 0;
|
---|
3368 | pReNative->Core.bmHstRegs = RT_BIT_32(idxRegNew) | (pReNative->Core.bmHstRegs & ~RT_BIT_32(idxRegOld));
|
---|
3369 | return off;
|
---|
3370 | }
|
---|
3371 |
|
---|
3372 |
|
---|
3373 | /**
|
---|
3374 | * Moves a variable to a different register or spills it onto the stack.
|
---|
3375 | *
|
---|
3376 | * This must be a stack variable (kIemNativeVarKind_Stack) because the other
|
---|
3377 | * kinds can easily be recreated if needed later.
|
---|
3378 | *
|
---|
3379 | * @returns The new code buffer position.
|
---|
3380 | * @param pReNative The native recompile state.
|
---|
3381 | * @param off The current code buffer position.
|
---|
3382 | * @param idxVar The variable index.
|
---|
3383 | * @param fForbiddenRegs Mask of the forbidden registers. Defaults to
|
---|
3384 | * call-volatile registers.
|
---|
3385 | */
|
---|
3386 | DECL_HIDDEN_THROW(uint32_t) iemNativeRegMoveOrSpillStackVar(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVar,
|
---|
3387 | uint32_t fForbiddenRegs /*= IEMNATIVE_CALL_VOLATILE_GREG_MASK*/)
|
---|
3388 | {
|
---|
3389 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
3390 | PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
|
---|
3391 | Assert(pVar->enmKind == kIemNativeVarKind_Stack);
|
---|
3392 | Assert(!pVar->fRegAcquired);
|
---|
3393 |
|
---|
3394 | uint8_t const idxRegOld = pVar->idxReg;
|
---|
3395 | Assert(idxRegOld < RT_ELEMENTS(pReNative->Core.aHstRegs));
|
---|
3396 | Assert(pReNative->Core.bmHstRegs & RT_BIT_32(idxRegOld));
|
---|
3397 | Assert(pReNative->Core.aHstRegs[idxRegOld].enmWhat == kIemNativeWhat_Var);
|
---|
3398 | Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows)
|
---|
3399 | == pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows);
|
---|
3400 | Assert(pReNative->Core.bmGstRegShadows < RT_BIT_64(kIemNativeGstReg_End));
|
---|
3401 | Assert( RT_BOOL(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxRegOld))
|
---|
3402 | == RT_BOOL(pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows));
|
---|
3403 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
3404 | Assert(!(pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows & pReNative->Core.bmGstRegShadowDirty));
|
---|
3405 | #endif
|
---|
3406 |
|
---|
3407 |
|
---|
3408 | /** @todo Add statistics on this.*/
|
---|
3409 | /** @todo Implement basic variable liveness analysis (python) so variables
|
---|
3410 | * can be freed immediately once no longer used. This has the potential to
|
---|
3411 | * be trashing registers and stack for dead variables.
|
---|
3412 | * Update: This is mostly done. (Not IEMNATIVE_WITH_LIVENESS_ANALYSIS.) */
|
---|
3413 |
|
---|
3414 | /*
|
---|
3415 | * First try move it to a different register, as that's cheaper.
|
---|
3416 | */
|
---|
3417 | fForbiddenRegs |= RT_BIT_32(idxRegOld);
|
---|
3418 | fForbiddenRegs |= IEMNATIVE_REG_FIXED_MASK;
|
---|
3419 | uint32_t fRegs = ~pReNative->Core.bmHstRegs & ~fForbiddenRegs;
|
---|
3420 | if (fRegs)
|
---|
3421 | {
|
---|
3422 | /* Avoid using shadow registers, if possible. */
|
---|
3423 | if (fRegs & ~pReNative->Core.bmHstRegsWithGstShadow)
|
---|
3424 | fRegs &= ~pReNative->Core.bmHstRegsWithGstShadow;
|
---|
3425 | unsigned const idxRegNew = ASMBitFirstSetU32(fRegs) - 1;
|
---|
3426 | return iemNativeRegMoveVar(pReNative, off, idxVar, idxRegOld, idxRegNew, "iemNativeRegMoveOrSpillStackVar");
|
---|
3427 | }
|
---|
3428 |
|
---|
3429 | /*
|
---|
3430 | * Otherwise we must spill the register onto the stack.
|
---|
3431 | */
|
---|
3432 | uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
|
---|
3433 | Log12(("iemNativeRegMoveOrSpillStackVar: spilling idxVar=%#x/idxReg=%d onto the stack (slot %#x bp+%d, off=%#x)\n",
|
---|
3434 | idxVar, idxRegOld, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
|
---|
3435 | off = iemNativeEmitStoreGprByBp(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxRegOld);
|
---|
3436 |
|
---|
3437 | pVar->idxReg = UINT8_MAX;
|
---|
3438 | pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxRegOld);
|
---|
3439 | pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxRegOld);
|
---|
3440 | pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows;
|
---|
3441 | pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows = 0;
|
---|
3442 | return off;
|
---|
3443 | }
|
---|
3444 |
|
---|
3445 |
|
---|
3446 | /**
|
---|
3447 | * Allocates a temporary host general purpose register.
|
---|
3448 | *
|
---|
3449 | * This may emit code to save register content onto the stack in order to free
|
---|
3450 | * up a register.
|
---|
3451 | *
|
---|
3452 | * @returns The host register number; throws VBox status code on failure,
|
---|
3453 | * so no need to check the return value.
|
---|
3454 | * @param pReNative The native recompile state.
|
---|
3455 | * @param poff Pointer to the variable with the code buffer position.
|
---|
3456 | * This will be update if we need to move a variable from
|
---|
3457 | * register to stack in order to satisfy the request.
|
---|
3458 | * @param fPreferVolatile Whether to prefer volatile over non-volatile
|
---|
3459 | * registers (@c true, default) or the other way around
|
---|
3460 | * (@c false, for iemNativeRegAllocTmpForGuestReg()).
|
---|
3461 | */
|
---|
3462 | DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fPreferVolatile /*= true*/)
|
---|
3463 | {
|
---|
3464 | /*
|
---|
3465 | * Try find a completely unused register, preferably a call-volatile one.
|
---|
3466 | */
|
---|
3467 | uint8_t idxReg;
|
---|
3468 | uint32_t fRegs = ~pReNative->Core.bmHstRegs
|
---|
3469 | & ~pReNative->Core.bmHstRegsWithGstShadow
|
---|
3470 | & (~IEMNATIVE_REG_FIXED_MASK & IEMNATIVE_HST_GREG_MASK);
|
---|
3471 | if (fRegs)
|
---|
3472 | {
|
---|
3473 | if (fPreferVolatile)
|
---|
3474 | idxReg = (uint8_t)ASMBitFirstSetU32( fRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK
|
---|
3475 | ? fRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK : fRegs) - 1;
|
---|
3476 | else
|
---|
3477 | idxReg = (uint8_t)ASMBitFirstSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK
|
---|
3478 | ? fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK : fRegs) - 1;
|
---|
3479 | Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows == 0);
|
---|
3480 | Assert(!(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg)));
|
---|
3481 | Log12(("iemNativeRegAllocTmp: %s\n", g_apszIemNativeHstRegNames[idxReg]));
|
---|
3482 | }
|
---|
3483 | else
|
---|
3484 | {
|
---|
3485 | idxReg = iemNativeRegAllocFindFree(pReNative, poff, fPreferVolatile);
|
---|
3486 | AssertStmt(idxReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_TMP));
|
---|
3487 | Log12(("iemNativeRegAllocTmp: %s (slow)\n", g_apszIemNativeHstRegNames[idxReg]));
|
---|
3488 | }
|
---|
3489 | return iemNativeRegMarkAllocated(pReNative, idxReg, kIemNativeWhat_Tmp);
|
---|
3490 | }
|
---|
3491 |
|
---|
3492 |
|
---|
3493 | /**
|
---|
3494 | * Alternative version of iemNativeRegAllocTmp that takes mask with acceptable
|
---|
3495 | * registers.
|
---|
3496 | *
|
---|
3497 | * @returns The host register number; throws VBox status code on failure,
|
---|
3498 | * so no need to check the return value.
|
---|
3499 | * @param pReNative The native recompile state.
|
---|
3500 | * @param poff Pointer to the variable with the code buffer position.
|
---|
3501 | * This will be update if we need to move a variable from
|
---|
3502 | * register to stack in order to satisfy the request.
|
---|
3503 | * @param fRegMask Mask of acceptable registers.
|
---|
3504 | * @param fPreferVolatile Whether to prefer volatile over non-volatile
|
---|
3505 | * registers (@c true, default) or the other way around
|
---|
3506 | * (@c false, for iemNativeRegAllocTmpForGuestReg()).
|
---|
3507 | */
|
---|
3508 | DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpEx(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint32_t fRegMask,
|
---|
3509 | bool fPreferVolatile /*= true*/)
|
---|
3510 | {
|
---|
3511 | Assert(!(fRegMask & ~IEMNATIVE_HST_GREG_MASK));
|
---|
3512 | Assert(!(fRegMask & IEMNATIVE_REG_FIXED_MASK));
|
---|
3513 |
|
---|
3514 | /*
|
---|
3515 | * Try find a completely unused register, preferably a call-volatile one.
|
---|
3516 | */
|
---|
3517 | uint8_t idxReg;
|
---|
3518 | uint32_t fRegs = ~pReNative->Core.bmHstRegs
|
---|
3519 | & ~pReNative->Core.bmHstRegsWithGstShadow
|
---|
3520 | & (~IEMNATIVE_REG_FIXED_MASK & IEMNATIVE_HST_GREG_MASK)
|
---|
3521 | & fRegMask;
|
---|
3522 | if (fRegs)
|
---|
3523 | {
|
---|
3524 | if (fPreferVolatile)
|
---|
3525 | idxReg = (uint8_t)ASMBitFirstSetU32( fRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK
|
---|
3526 | ? fRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK : fRegs) - 1;
|
---|
3527 | else
|
---|
3528 | idxReg = (uint8_t)ASMBitFirstSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK
|
---|
3529 | ? fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK : fRegs) - 1;
|
---|
3530 | Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows == 0);
|
---|
3531 | Assert(!(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg)));
|
---|
3532 | Log12(("iemNativeRegAllocTmpEx: %s\n", g_apszIemNativeHstRegNames[idxReg]));
|
---|
3533 | }
|
---|
3534 | else
|
---|
3535 | {
|
---|
3536 | idxReg = iemNativeRegAllocFindFree(pReNative, poff, fPreferVolatile, fRegMask);
|
---|
3537 | AssertStmt(idxReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_TMP));
|
---|
3538 | Log12(("iemNativeRegAllocTmpEx: %s (slow)\n", g_apszIemNativeHstRegNames[idxReg]));
|
---|
3539 | }
|
---|
3540 | return iemNativeRegMarkAllocated(pReNative, idxReg, kIemNativeWhat_Tmp);
|
---|
3541 | }
|
---|
3542 |
|
---|
3543 |
|
---|
3544 | /**
|
---|
3545 | * Allocates a temporary register for loading an immediate value into.
|
---|
3546 | *
|
---|
3547 | * This will emit code to load the immediate, unless there happens to be an
|
---|
3548 | * unused register with the value already loaded.
|
---|
3549 | *
|
---|
3550 | * The caller will not modify the returned register, it must be considered
|
---|
3551 | * read-only. Free using iemNativeRegFreeTmpImm.
|
---|
3552 | *
|
---|
3553 | * @returns The host register number; throws VBox status code on failure, so no
|
---|
3554 | * need to check the return value.
|
---|
3555 | * @param pReNative The native recompile state.
|
---|
3556 | * @param poff Pointer to the variable with the code buffer position.
|
---|
3557 | * @param uImm The immediate value that the register must hold upon
|
---|
3558 | * return.
|
---|
3559 | * @param fPreferVolatile Whether to prefer volatile over non-volatile
|
---|
3560 | * registers (@c true, default) or the other way around
|
---|
3561 | * (@c false).
|
---|
3562 | *
|
---|
3563 | * @note Reusing immediate values has not been implemented yet.
|
---|
3564 | */
|
---|
3565 | DECL_HIDDEN_THROW(uint8_t)
|
---|
3566 | iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm, bool fPreferVolatile /*= true*/)
|
---|
3567 | {
|
---|
3568 | uint8_t const idxReg = iemNativeRegAllocTmp(pReNative, poff, fPreferVolatile);
|
---|
3569 | *poff = iemNativeEmitLoadGprImm64(pReNative, *poff, idxReg, uImm);
|
---|
3570 | return idxReg;
|
---|
3571 | }
|
---|
3572 |
|
---|
3573 |
|
---|
3574 | /**
|
---|
3575 | * Allocates a temporary host general purpose register for keeping a guest
|
---|
3576 | * register value.
|
---|
3577 | *
|
---|
3578 | * Since we may already have a register holding the guest register value,
|
---|
3579 | * code will be emitted to do the loading if that's not the case. Code may also
|
---|
3580 | * be emitted if we have to free up a register to satify the request.
|
---|
3581 | *
|
---|
3582 | * @returns The host register number; throws VBox status code on failure, so no
|
---|
3583 | * need to check the return value.
|
---|
3584 | * @param pReNative The native recompile state.
|
---|
3585 | * @param poff Pointer to the variable with the code buffer
|
---|
3586 | * position. This will be update if we need to move a
|
---|
3587 | * variable from register to stack in order to satisfy
|
---|
3588 | * the request.
|
---|
3589 | * @param enmGstReg The guest register that will is to be updated.
|
---|
3590 | * @param enmIntendedUse How the caller will be using the host register.
|
---|
3591 | * @param fNoVolatileRegs Set if no volatile register allowed, clear if any
|
---|
3592 | * register is okay (default). The ASSUMPTION here is
|
---|
3593 | * that the caller has already flushed all volatile
|
---|
3594 | * registers, so this is only applied if we allocate a
|
---|
3595 | * new register.
|
---|
3596 | * @param fSkipLivenessAssert Hack for liveness input validation of EFLAGS.
|
---|
3597 | * @sa iemNativeRegAllocTmpForGuestRegIfAlreadyPresent
|
---|
3598 | */
|
---|
3599 | DECL_HIDDEN_THROW(uint8_t)
|
---|
3600 | iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg,
|
---|
3601 | IEMNATIVEGSTREGUSE enmIntendedUse /*= kIemNativeGstRegUse_ReadOnly*/,
|
---|
3602 | bool fNoVolatileRegs /*= false*/, bool fSkipLivenessAssert /*= false*/)
|
---|
3603 | {
|
---|
3604 | Assert(enmGstReg < kIemNativeGstReg_End && g_aGstShadowInfo[enmGstReg].cb != 0);
|
---|
3605 | #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
|
---|
3606 | AssertMsg( fSkipLivenessAssert
|
---|
3607 | || pReNative->idxCurCall == 0
|
---|
3608 | || enmGstReg == kIemNativeGstReg_Pc
|
---|
3609 | || (enmIntendedUse == kIemNativeGstRegUse_ForFullWrite
|
---|
3610 | ? IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg))
|
---|
3611 | : enmIntendedUse == kIemNativeGstRegUse_ForUpdate
|
---|
3612 | ? IEMLIVENESS_STATE_IS_MODIFY_EXPECTED( iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg))
|
---|
3613 | : IEMLIVENESS_STATE_IS_INPUT_EXPECTED( iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)) ),
|
---|
3614 | ("%s - %u\n", g_aGstShadowInfo[enmGstReg].pszName, iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)));
|
---|
3615 | #endif
|
---|
3616 | RT_NOREF(fSkipLivenessAssert);
|
---|
3617 | #if defined(LOG_ENABLED) || defined(VBOX_STRICT)
|
---|
3618 | static const char * const s_pszIntendedUse[] = { "fetch", "update", "full write", "destructive calc" };
|
---|
3619 | #endif
|
---|
3620 | uint32_t const fRegMask = !fNoVolatileRegs
|
---|
3621 | ? IEMNATIVE_HST_GREG_MASK & ~IEMNATIVE_REG_FIXED_MASK
|
---|
3622 | : IEMNATIVE_HST_GREG_MASK & ~IEMNATIVE_REG_FIXED_MASK & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK;
|
---|
3623 |
|
---|
3624 | /*
|
---|
3625 | * First check if the guest register value is already in a host register.
|
---|
3626 | */
|
---|
3627 | if (pReNative->Core.bmGstRegShadows & RT_BIT_64(enmGstReg))
|
---|
3628 | {
|
---|
3629 | uint8_t idxReg = pReNative->Core.aidxGstRegShadows[enmGstReg];
|
---|
3630 | Assert(idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs));
|
---|
3631 | Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows & RT_BIT_64(enmGstReg));
|
---|
3632 | Assert(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg));
|
---|
3633 |
|
---|
3634 | /* It's not supposed to be allocated... */
|
---|
3635 | if (!(pReNative->Core.bmHstRegs & RT_BIT_32(idxReg)))
|
---|
3636 | {
|
---|
3637 | /*
|
---|
3638 | * If the register will trash the guest shadow copy, try find a
|
---|
3639 | * completely unused register we can use instead. If that fails,
|
---|
3640 | * we need to disassociate the host reg from the guest reg.
|
---|
3641 | */
|
---|
3642 | /** @todo would be nice to know if preserving the register is in any way helpful. */
|
---|
3643 | /* If the purpose is calculations, try duplicate the register value as
|
---|
3644 | we'll be clobbering the shadow. */
|
---|
3645 | if ( enmIntendedUse == kIemNativeGstRegUse_Calculation
|
---|
3646 | && ( ~pReNative->Core.bmHstRegs
|
---|
3647 | & ~pReNative->Core.bmHstRegsWithGstShadow
|
---|
3648 | & (~IEMNATIVE_REG_FIXED_MASK & IEMNATIVE_HST_GREG_MASK)))
|
---|
3649 | {
|
---|
3650 | uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask);
|
---|
3651 |
|
---|
3652 | *poff = iemNativeEmitLoadGprFromGpr(pReNative, *poff, idxRegNew, idxReg);
|
---|
3653 |
|
---|
3654 | Log12(("iemNativeRegAllocTmpForGuestReg: Duplicated %s for guest %s into %s for destructive calc\n",
|
---|
3655 | g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName,
|
---|
3656 | g_apszIemNativeHstRegNames[idxRegNew]));
|
---|
3657 | idxReg = idxRegNew;
|
---|
3658 | }
|
---|
3659 | /* If the current register matches the restrictions, go ahead and allocate
|
---|
3660 | it for the caller. */
|
---|
3661 | else if (fRegMask & RT_BIT_32(idxReg))
|
---|
3662 | {
|
---|
3663 | pReNative->Core.bmHstRegs |= RT_BIT_32(idxReg);
|
---|
3664 | pReNative->Core.aHstRegs[idxReg].enmWhat = kIemNativeWhat_Tmp;
|
---|
3665 | pReNative->Core.aHstRegs[idxReg].idxVar = UINT8_MAX;
|
---|
3666 | if (enmIntendedUse != kIemNativeGstRegUse_Calculation)
|
---|
3667 | Log12(("iemNativeRegAllocTmpForGuestReg: Reusing %s for guest %s %s\n",
|
---|
3668 | g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName, s_pszIntendedUse[enmIntendedUse]));
|
---|
3669 | else
|
---|
3670 | {
|
---|
3671 | iemNativeRegClearGstRegShadowing(pReNative, idxReg, *poff);
|
---|
3672 | Log12(("iemNativeRegAllocTmpForGuestReg: Grabbing %s for guest %s - destructive calc\n",
|
---|
3673 | g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName));
|
---|
3674 | }
|
---|
3675 | }
|
---|
3676 | /* Otherwise, allocate a register that satisfies the caller and transfer
|
---|
3677 | the shadowing if compatible with the intended use. (This basically
|
---|
3678 | means the call wants a non-volatile register (RSP push/pop scenario).) */
|
---|
3679 | else
|
---|
3680 | {
|
---|
3681 | Assert(fNoVolatileRegs);
|
---|
3682 | uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask & ~RT_BIT_32(idxReg),
|
---|
3683 | !fNoVolatileRegs
|
---|
3684 | && enmIntendedUse == kIemNativeGstRegUse_Calculation);
|
---|
3685 | *poff = iemNativeEmitLoadGprFromGpr(pReNative, *poff, idxRegNew, idxReg);
|
---|
3686 | if (enmIntendedUse != kIemNativeGstRegUse_Calculation)
|
---|
3687 | {
|
---|
3688 | iemNativeRegTransferGstRegShadowing(pReNative, idxReg, idxRegNew, enmGstReg, *poff);
|
---|
3689 | Log12(("iemNativeRegAllocTmpForGuestReg: Transfering %s to %s for guest %s %s\n",
|
---|
3690 | g_apszIemNativeHstRegNames[idxReg], g_apszIemNativeHstRegNames[idxRegNew],
|
---|
3691 | g_aGstShadowInfo[enmGstReg].pszName, s_pszIntendedUse[enmIntendedUse]));
|
---|
3692 | }
|
---|
3693 | else
|
---|
3694 | Log12(("iemNativeRegAllocTmpForGuestReg: Duplicated %s for guest %s into %s for destructive calc\n",
|
---|
3695 | g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName,
|
---|
3696 | g_apszIemNativeHstRegNames[idxRegNew]));
|
---|
3697 | idxReg = idxRegNew;
|
---|
3698 | }
|
---|
3699 | }
|
---|
3700 | else
|
---|
3701 | {
|
---|
3702 | /*
|
---|
3703 | * Oops. Shadowed guest register already allocated!
|
---|
3704 | *
|
---|
3705 | * Allocate a new register, copy the value and, if updating, the
|
---|
3706 | * guest shadow copy assignment to the new register.
|
---|
3707 | */
|
---|
3708 | AssertMsg( enmIntendedUse != kIemNativeGstRegUse_ForUpdate
|
---|
3709 | && enmIntendedUse != kIemNativeGstRegUse_ForFullWrite,
|
---|
3710 | ("This shouldn't happen: idxReg=%d enmGstReg=%d enmIntendedUse=%s\n",
|
---|
3711 | idxReg, enmGstReg, s_pszIntendedUse[enmIntendedUse]));
|
---|
3712 |
|
---|
3713 | /** @todo share register for readonly access. */
|
---|
3714 | uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask,
|
---|
3715 | enmIntendedUse == kIemNativeGstRegUse_Calculation);
|
---|
3716 |
|
---|
3717 | if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
|
---|
3718 | *poff = iemNativeEmitLoadGprFromGpr(pReNative, *poff, idxRegNew, idxReg);
|
---|
3719 |
|
---|
3720 | if ( enmIntendedUse != kIemNativeGstRegUse_ForUpdate
|
---|
3721 | && enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
|
---|
3722 | Log12(("iemNativeRegAllocTmpForGuestReg: Duplicated %s for guest %s into %s for %s\n",
|
---|
3723 | g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName,
|
---|
3724 | g_apszIemNativeHstRegNames[idxRegNew], s_pszIntendedUse[enmIntendedUse]));
|
---|
3725 | else
|
---|
3726 | {
|
---|
3727 | iemNativeRegTransferGstRegShadowing(pReNative, idxReg, idxRegNew, enmGstReg, *poff);
|
---|
3728 | Log12(("iemNativeRegAllocTmpForGuestReg: Moved %s for guest %s into %s for %s\n",
|
---|
3729 | g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName,
|
---|
3730 | g_apszIemNativeHstRegNames[idxRegNew], s_pszIntendedUse[enmIntendedUse]));
|
---|
3731 | }
|
---|
3732 | idxReg = idxRegNew;
|
---|
3733 | }
|
---|
3734 | Assert(RT_BIT_32(idxReg) & fRegMask); /* See assumption in fNoVolatileRegs docs. */
|
---|
3735 |
|
---|
3736 | #ifdef VBOX_STRICT
|
---|
3737 | /* Strict builds: Check that the value is correct. */
|
---|
3738 | *poff = iemNativeEmitGuestRegValueCheck(pReNative, *poff, idxReg, enmGstReg);
|
---|
3739 | #endif
|
---|
3740 |
|
---|
3741 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
3742 | /** @todo r=aeichner Implement for registers other than GPR as well. */
|
---|
3743 | if ( ( enmIntendedUse == kIemNativeGstRegUse_ForFullWrite
|
---|
3744 | || enmIntendedUse == kIemNativeGstRegUse_ForUpdate)
|
---|
3745 | && ( ( enmGstReg >= kIemNativeGstReg_GprFirst
|
---|
3746 | && enmGstReg <= kIemNativeGstReg_GprLast)
|
---|
3747 | || enmGstReg == kIemNativeGstReg_MxCsr))
|
---|
3748 | {
|
---|
3749 | # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
3750 | iemNativeDbgInfoAddNativeOffset(pReNative, *poff);
|
---|
3751 | iemNativeDbgInfoAddGuestRegDirty(pReNative, false /*fSimdReg*/, enmGstReg, idxReg);
|
---|
3752 | # endif
|
---|
3753 | pReNative->Core.bmGstRegShadowDirty |= RT_BIT_64(enmGstReg);
|
---|
3754 | }
|
---|
3755 | #endif
|
---|
3756 |
|
---|
3757 | return idxReg;
|
---|
3758 | }
|
---|
3759 |
|
---|
3760 | /*
|
---|
3761 | * Allocate a new register, load it with the guest value and designate it as a copy of the
|
---|
3762 | */
|
---|
3763 | uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask, enmIntendedUse == kIemNativeGstRegUse_Calculation);
|
---|
3764 |
|
---|
3765 | if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
|
---|
3766 | *poff = iemNativeEmitLoadGprWithGstShadowReg(pReNative, *poff, idxRegNew, enmGstReg);
|
---|
3767 |
|
---|
3768 | if (enmIntendedUse != kIemNativeGstRegUse_Calculation)
|
---|
3769 | iemNativeRegMarkAsGstRegShadow(pReNative, idxRegNew, enmGstReg, *poff);
|
---|
3770 | Log12(("iemNativeRegAllocTmpForGuestReg: Allocated %s for guest %s %s\n",
|
---|
3771 | g_apszIemNativeHstRegNames[idxRegNew], g_aGstShadowInfo[enmGstReg].pszName, s_pszIntendedUse[enmIntendedUse]));
|
---|
3772 |
|
---|
3773 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
3774 | /** @todo r=aeichner Implement for registers other than GPR as well. */
|
---|
3775 | if ( ( enmIntendedUse == kIemNativeGstRegUse_ForFullWrite
|
---|
3776 | || enmIntendedUse == kIemNativeGstRegUse_ForUpdate)
|
---|
3777 | && ( ( enmGstReg >= kIemNativeGstReg_GprFirst
|
---|
3778 | && enmGstReg <= kIemNativeGstReg_GprLast)
|
---|
3779 | || enmGstReg == kIemNativeGstReg_MxCsr))
|
---|
3780 | {
|
---|
3781 | # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
3782 | iemNativeDbgInfoAddNativeOffset(pReNative, *poff);
|
---|
3783 | iemNativeDbgInfoAddGuestRegDirty(pReNative, false /*fSimdReg*/, enmGstReg, idxRegNew);
|
---|
3784 | # endif
|
---|
3785 | pReNative->Core.bmGstRegShadowDirty |= RT_BIT_64(enmGstReg);
|
---|
3786 | }
|
---|
3787 | #endif
|
---|
3788 |
|
---|
3789 | return idxRegNew;
|
---|
3790 | }
|
---|
3791 |
|
---|
3792 |
|
---|
3793 | /**
|
---|
3794 | * Allocates a temporary host general purpose register that already holds the
|
---|
3795 | * given guest register value.
|
---|
3796 | *
|
---|
3797 | * The use case for this function is places where the shadowing state cannot be
|
---|
3798 | * modified due to branching and such. This will fail if the we don't have a
|
---|
3799 | * current shadow copy handy or if it's incompatible. The only code that will
|
---|
3800 | * be emitted here is value checking code in strict builds.
|
---|
3801 | *
|
---|
3802 | * The intended use can only be readonly!
|
---|
3803 | *
|
---|
3804 | * @returns The host register number, UINT8_MAX if not present.
|
---|
3805 | * @param pReNative The native recompile state.
|
---|
3806 | * @param poff Pointer to the instruction buffer offset.
|
---|
3807 | * Will be updated in strict builds if a register is
|
---|
3808 | * found.
|
---|
3809 | * @param enmGstReg The guest register that will is to be updated.
|
---|
3810 | * @note In strict builds, this may throw instruction buffer growth failures.
|
---|
3811 | * Non-strict builds will not throw anything.
|
---|
3812 | * @sa iemNativeRegAllocTmpForGuestReg
|
---|
3813 | */
|
---|
3814 | DECL_HIDDEN_THROW(uint8_t)
|
---|
3815 | iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg)
|
---|
3816 | {
|
---|
3817 | Assert(enmGstReg < kIemNativeGstReg_End && g_aGstShadowInfo[enmGstReg].cb != 0);
|
---|
3818 | #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
|
---|
3819 | AssertMsg( pReNative->idxCurCall == 0
|
---|
3820 | || IEMLIVENESS_STATE_IS_INPUT_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg))
|
---|
3821 | || enmGstReg == kIemNativeGstReg_Pc,
|
---|
3822 | ("%s - %u\n", g_aGstShadowInfo[enmGstReg].pszName, iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)));
|
---|
3823 | #endif
|
---|
3824 |
|
---|
3825 | /*
|
---|
3826 | * First check if the guest register value is already in a host register.
|
---|
3827 | */
|
---|
3828 | if (pReNative->Core.bmGstRegShadows & RT_BIT_64(enmGstReg))
|
---|
3829 | {
|
---|
3830 | uint8_t idxReg = pReNative->Core.aidxGstRegShadows[enmGstReg];
|
---|
3831 | Assert(idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs));
|
---|
3832 | Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows & RT_BIT_64(enmGstReg));
|
---|
3833 | Assert(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg));
|
---|
3834 |
|
---|
3835 | if (!(pReNative->Core.bmHstRegs & RT_BIT_32(idxReg)))
|
---|
3836 | {
|
---|
3837 | /*
|
---|
3838 | * We only do readonly use here, so easy compared to the other
|
---|
3839 | * variant of this code.
|
---|
3840 | */
|
---|
3841 | pReNative->Core.bmHstRegs |= RT_BIT_32(idxReg);
|
---|
3842 | pReNative->Core.aHstRegs[idxReg].enmWhat = kIemNativeWhat_Tmp;
|
---|
3843 | pReNative->Core.aHstRegs[idxReg].idxVar = UINT8_MAX;
|
---|
3844 | Log12(("iemNativeRegAllocTmpForGuestRegIfAlreadyPresent: Reusing %s for guest %s readonly\n",
|
---|
3845 | g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName));
|
---|
3846 |
|
---|
3847 | #ifdef VBOX_STRICT
|
---|
3848 | /* Strict builds: Check that the value is correct. */
|
---|
3849 | *poff = iemNativeEmitGuestRegValueCheck(pReNative, *poff, idxReg, enmGstReg);
|
---|
3850 | #else
|
---|
3851 | RT_NOREF(poff);
|
---|
3852 | #endif
|
---|
3853 | return idxReg;
|
---|
3854 | }
|
---|
3855 | }
|
---|
3856 |
|
---|
3857 | return UINT8_MAX;
|
---|
3858 | }
|
---|
3859 |
|
---|
3860 |
|
---|
3861 | /**
|
---|
3862 | * Allocates argument registers for a function call.
|
---|
3863 | *
|
---|
3864 | * @returns New code buffer offset on success; throws VBox status code on failure, so no
|
---|
3865 | * need to check the return value.
|
---|
3866 | * @param pReNative The native recompile state.
|
---|
3867 | * @param off The current code buffer offset.
|
---|
3868 | * @param cArgs The number of arguments the function call takes.
|
---|
3869 | */
|
---|
3870 | DECL_HIDDEN_THROW(uint32_t) iemNativeRegAllocArgs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs)
|
---|
3871 | {
|
---|
3872 | AssertStmt(cArgs <= IEMNATIVE_CALL_ARG_GREG_COUNT + IEMNATIVE_FRAME_STACK_ARG_COUNT,
|
---|
3873 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_4));
|
---|
3874 | Assert(RT_ELEMENTS(g_aidxIemNativeCallRegs) == IEMNATIVE_CALL_ARG_GREG_COUNT);
|
---|
3875 | Assert(RT_ELEMENTS(g_afIemNativeCallRegs) == IEMNATIVE_CALL_ARG_GREG_COUNT);
|
---|
3876 |
|
---|
3877 | if (cArgs > RT_ELEMENTS(g_aidxIemNativeCallRegs))
|
---|
3878 | cArgs = RT_ELEMENTS(g_aidxIemNativeCallRegs);
|
---|
3879 | else if (cArgs == 0)
|
---|
3880 | return true;
|
---|
3881 |
|
---|
3882 | /*
|
---|
3883 | * Do we get luck and all register are free and not shadowing anything?
|
---|
3884 | */
|
---|
3885 | if (((pReNative->Core.bmHstRegs | pReNative->Core.bmHstRegsWithGstShadow) & g_afIemNativeCallRegs[cArgs]) == 0)
|
---|
3886 | for (uint32_t i = 0; i < cArgs; i++)
|
---|
3887 | {
|
---|
3888 | uint8_t const idxReg = g_aidxIemNativeCallRegs[i];
|
---|
3889 | pReNative->Core.aHstRegs[idxReg].enmWhat = kIemNativeWhat_Arg;
|
---|
3890 | pReNative->Core.aHstRegs[idxReg].idxVar = UINT8_MAX;
|
---|
3891 | Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows == 0);
|
---|
3892 | }
|
---|
3893 | /*
|
---|
3894 | * Okay, not lucky so we have to free up the registers.
|
---|
3895 | */
|
---|
3896 | else
|
---|
3897 | for (uint32_t i = 0; i < cArgs; i++)
|
---|
3898 | {
|
---|
3899 | uint8_t const idxReg = g_aidxIemNativeCallRegs[i];
|
---|
3900 | if (pReNative->Core.bmHstRegs & RT_BIT_32(idxReg))
|
---|
3901 | {
|
---|
3902 | switch (pReNative->Core.aHstRegs[idxReg].enmWhat)
|
---|
3903 | {
|
---|
3904 | case kIemNativeWhat_Var:
|
---|
3905 | {
|
---|
3906 | uint8_t const idxVar = pReNative->Core.aHstRegs[idxReg].idxVar;
|
---|
3907 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
3908 | AssertStmt(IEMNATIVE_VAR_IDX_UNPACK(idxVar) < RT_ELEMENTS(pReNative->Core.aVars),
|
---|
3909 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_5));
|
---|
3910 | Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxReg);
|
---|
3911 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
3912 | Assert(!pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fSimdReg);
|
---|
3913 | #endif
|
---|
3914 |
|
---|
3915 | if (pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].enmKind != kIemNativeVarKind_Stack)
|
---|
3916 | pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg = UINT8_MAX;
|
---|
3917 | else
|
---|
3918 | {
|
---|
3919 | off = iemNativeRegMoveOrSpillStackVar(pReNative, off, idxVar);
|
---|
3920 | Assert(!(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg)));
|
---|
3921 | }
|
---|
3922 | break;
|
---|
3923 | }
|
---|
3924 |
|
---|
3925 | case kIemNativeWhat_Tmp:
|
---|
3926 | case kIemNativeWhat_Arg:
|
---|
3927 | case kIemNativeWhat_rc:
|
---|
3928 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_5));
|
---|
3929 | default:
|
---|
3930 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_6));
|
---|
3931 | }
|
---|
3932 |
|
---|
3933 | }
|
---|
3934 | if (pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg))
|
---|
3935 | {
|
---|
3936 | Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows != 0);
|
---|
3937 | Assert( (pReNative->Core.aHstRegs[idxReg].fGstRegShadows & pReNative->Core.bmGstRegShadows)
|
---|
3938 | == pReNative->Core.aHstRegs[idxReg].fGstRegShadows);
|
---|
3939 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
3940 | Assert(!(pReNative->Core.aHstRegs[idxReg].fGstRegShadows & pReNative->Core.bmGstRegShadowDirty));
|
---|
3941 | #endif
|
---|
3942 | pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxReg);
|
---|
3943 | pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxReg].fGstRegShadows;
|
---|
3944 | pReNative->Core.aHstRegs[idxReg].fGstRegShadows = 0;
|
---|
3945 | }
|
---|
3946 | else
|
---|
3947 | Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows == 0);
|
---|
3948 | pReNative->Core.aHstRegs[idxReg].enmWhat = kIemNativeWhat_Arg;
|
---|
3949 | pReNative->Core.aHstRegs[idxReg].idxVar = UINT8_MAX;
|
---|
3950 | }
|
---|
3951 | pReNative->Core.bmHstRegs |= g_afIemNativeCallRegs[cArgs];
|
---|
3952 | return true;
|
---|
3953 | }
|
---|
3954 |
|
---|
3955 |
|
---|
3956 | DECL_HIDDEN_THROW(uint8_t) iemNativeRegAssignRc(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg);
|
---|
3957 |
|
---|
3958 |
|
---|
3959 | #if 0
|
---|
3960 | /**
|
---|
3961 | * Frees a register assignment of any type.
|
---|
3962 | *
|
---|
3963 | * @param pReNative The native recompile state.
|
---|
3964 | * @param idxHstReg The register to free.
|
---|
3965 | *
|
---|
3966 | * @note Does not update variables.
|
---|
3967 | */
|
---|
3968 | DECLHIDDEN(void) iemNativeRegFree(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT
|
---|
3969 | {
|
---|
3970 | Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs));
|
---|
3971 | Assert(pReNative->Core.bmHstRegs & RT_BIT_32(idxHstReg));
|
---|
3972 | Assert(!(IEMNATIVE_REG_FIXED_MASK & RT_BIT_32(idxHstReg)));
|
---|
3973 | Assert( pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Var
|
---|
3974 | || pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Tmp
|
---|
3975 | || pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Arg
|
---|
3976 | || pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_rc);
|
---|
3977 | Assert( pReNative->Core.aHstRegs[idxHstReg].enmWhat != kIemNativeWhat_Var
|
---|
3978 | || pReNative->Core.aVars[pReNative->Core.aHstRegs[idxHstReg].idxVar].idxReg == UINT8_MAX
|
---|
3979 | || (pReNative->Core.bmVars & RT_BIT_32(pReNative->Core.aHstRegs[idxHstReg].idxVar)));
|
---|
3980 | Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows)
|
---|
3981 | == pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows);
|
---|
3982 | Assert( RT_BOOL(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxHstReg))
|
---|
3983 | == RT_BOOL(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows));
|
---|
3984 |
|
---|
3985 | pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg);
|
---|
3986 | /* no flushing, right:
|
---|
3987 | pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxHstReg);
|
---|
3988 | pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows;
|
---|
3989 | pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = 0;
|
---|
3990 | */
|
---|
3991 | }
|
---|
3992 | #endif
|
---|
3993 |
|
---|
3994 |
|
---|
3995 | /**
|
---|
3996 | * Frees a temporary register.
|
---|
3997 | *
|
---|
3998 | * Any shadow copies of guest registers assigned to the host register will not
|
---|
3999 | * be flushed by this operation.
|
---|
4000 | */
|
---|
4001 | DECLHIDDEN(void) iemNativeRegFreeTmp(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT
|
---|
4002 | {
|
---|
4003 | Assert(pReNative->Core.bmHstRegs & RT_BIT_32(idxHstReg));
|
---|
4004 | Assert(pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Tmp);
|
---|
4005 | pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg);
|
---|
4006 | Log12(("iemNativeRegFreeTmp: %s (gst: %#RX64)\n",
|
---|
4007 | g_apszIemNativeHstRegNames[idxHstReg], pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows));
|
---|
4008 | }
|
---|
4009 |
|
---|
4010 |
|
---|
4011 | /**
|
---|
4012 | * Frees a temporary immediate register.
|
---|
4013 | *
|
---|
4014 | * It is assumed that the call has not modified the register, so it still hold
|
---|
4015 | * the same value as when it was allocated via iemNativeRegAllocTmpImm().
|
---|
4016 | */
|
---|
4017 | DECLHIDDEN(void) iemNativeRegFreeTmpImm(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT
|
---|
4018 | {
|
---|
4019 | iemNativeRegFreeTmp(pReNative, idxHstReg);
|
---|
4020 | }
|
---|
4021 |
|
---|
4022 |
|
---|
4023 | /**
|
---|
4024 | * Frees a register assigned to a variable.
|
---|
4025 | *
|
---|
4026 | * The register will be disassociated from the variable.
|
---|
4027 | */
|
---|
4028 | DECLHIDDEN(void) iemNativeRegFreeVar(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg, bool fFlushShadows) RT_NOEXCEPT
|
---|
4029 | {
|
---|
4030 | Assert(pReNative->Core.bmHstRegs & RT_BIT_32(idxHstReg));
|
---|
4031 | Assert(pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Var);
|
---|
4032 | uint8_t const idxVar = pReNative->Core.aHstRegs[idxHstReg].idxVar;
|
---|
4033 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
4034 | Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxHstReg);
|
---|
4035 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
4036 | Assert(!pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fSimdReg);
|
---|
4037 | #endif
|
---|
4038 |
|
---|
4039 | pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg = UINT8_MAX;
|
---|
4040 | pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg);
|
---|
4041 | if (!fFlushShadows)
|
---|
4042 | Log12(("iemNativeRegFreeVar: %s (gst: %#RX64) idxVar=%#x\n",
|
---|
4043 | g_apszIemNativeHstRegNames[idxHstReg], pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows, idxVar));
|
---|
4044 | else
|
---|
4045 | {
|
---|
4046 | pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxHstReg);
|
---|
4047 | uint64_t const fGstRegShadowsOld = pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows;
|
---|
4048 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
4049 | Assert(!(pReNative->Core.bmGstRegShadowDirty & fGstRegShadowsOld));
|
---|
4050 | #endif
|
---|
4051 | pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = 0;
|
---|
4052 | pReNative->Core.bmGstRegShadows &= ~fGstRegShadowsOld;
|
---|
4053 | uint64_t fGstRegShadows = fGstRegShadowsOld;
|
---|
4054 | while (fGstRegShadows)
|
---|
4055 | {
|
---|
4056 | unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegShadows) - 1;
|
---|
4057 | fGstRegShadows &= ~RT_BIT_64(idxGstReg);
|
---|
4058 |
|
---|
4059 | Assert(pReNative->Core.aidxGstRegShadows[idxGstReg] == idxHstReg);
|
---|
4060 | pReNative->Core.aidxGstRegShadows[idxGstReg] = UINT8_MAX;
|
---|
4061 | }
|
---|
4062 | Log12(("iemNativeRegFreeVar: %s (gst: %#RX64 -> 0) idxVar=%#x\n",
|
---|
4063 | g_apszIemNativeHstRegNames[idxHstReg], fGstRegShadowsOld, idxVar));
|
---|
4064 | }
|
---|
4065 | }
|
---|
4066 |
|
---|
4067 |
|
---|
4068 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
4069 | # if defined(LOG_ENABLED) || defined(IEMNATIVE_WITH_TB_DEBUG_INFO)
|
---|
4070 | /** Host CPU SIMD register names. */
|
---|
4071 | DECL_HIDDEN_CONST(const char * const) g_apszIemNativeHstSimdRegNames[] =
|
---|
4072 | {
|
---|
4073 | # ifdef RT_ARCH_AMD64
|
---|
4074 | "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15"
|
---|
4075 | # elif RT_ARCH_ARM64
|
---|
4076 | "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
|
---|
4077 | "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
|
---|
4078 | # else
|
---|
4079 | # error "port me"
|
---|
4080 | # endif
|
---|
4081 | };
|
---|
4082 | # endif
|
---|
4083 |
|
---|
4084 |
|
---|
4085 | /**
|
---|
4086 | * Frees a SIMD register assigned to a variable.
|
---|
4087 | *
|
---|
4088 | * The register will be disassociated from the variable.
|
---|
4089 | */
|
---|
4090 | DECLHIDDEN(void) iemNativeSimdRegFreeVar(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg, bool fFlushShadows) RT_NOEXCEPT
|
---|
4091 | {
|
---|
4092 | Assert(pReNative->Core.bmHstSimdRegs & RT_BIT_32(idxHstReg));
|
---|
4093 | Assert(pReNative->Core.aHstSimdRegs[idxHstReg].enmWhat == kIemNativeWhat_Var);
|
---|
4094 | uint8_t const idxVar = pReNative->Core.aHstSimdRegs[idxHstReg].idxVar;
|
---|
4095 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
4096 | Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxHstReg);
|
---|
4097 | Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fSimdReg);
|
---|
4098 |
|
---|
4099 | pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg = UINT8_MAX;
|
---|
4100 | pReNative->Core.bmHstSimdRegs &= ~RT_BIT_32(idxHstReg);
|
---|
4101 | if (!fFlushShadows)
|
---|
4102 | Log12(("iemNativeSimdRegFreeVar: %s (gst: %#RX64) idxVar=%#x\n",
|
---|
4103 | g_apszIemNativeHstSimdRegNames[idxHstReg], pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows, idxVar));
|
---|
4104 | else
|
---|
4105 | {
|
---|
4106 | pReNative->Core.bmHstSimdRegsWithGstShadow &= ~RT_BIT_32(idxHstReg);
|
---|
4107 | uint64_t const fGstRegShadowsOld = pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows;
|
---|
4108 | pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows = 0;
|
---|
4109 | pReNative->Core.bmGstSimdRegShadows &= ~fGstRegShadowsOld;
|
---|
4110 | uint64_t fGstRegShadows = fGstRegShadowsOld;
|
---|
4111 | while (fGstRegShadows)
|
---|
4112 | {
|
---|
4113 | unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegShadows) - 1;
|
---|
4114 | fGstRegShadows &= ~RT_BIT_64(idxGstReg);
|
---|
4115 |
|
---|
4116 | Assert(pReNative->Core.aidxGstSimdRegShadows[idxGstReg] == idxHstReg);
|
---|
4117 | pReNative->Core.aidxGstSimdRegShadows[idxGstReg] = UINT8_MAX;
|
---|
4118 | }
|
---|
4119 | Log12(("iemNativeSimdRegFreeVar: %s (gst: %#RX64 -> 0) idxVar=%#x\n",
|
---|
4120 | g_apszIemNativeHstSimdRegNames[idxHstReg], fGstRegShadowsOld, idxVar));
|
---|
4121 | }
|
---|
4122 | }
|
---|
4123 |
|
---|
4124 |
|
---|
4125 | /**
|
---|
4126 | * Reassigns a variable to a different SIMD register specified by the caller.
|
---|
4127 | *
|
---|
4128 | * @returns The new code buffer position.
|
---|
4129 | * @param pReNative The native recompile state.
|
---|
4130 | * @param off The current code buffer position.
|
---|
4131 | * @param idxVar The variable index.
|
---|
4132 | * @param idxRegOld The old host register number.
|
---|
4133 | * @param idxRegNew The new host register number.
|
---|
4134 | * @param pszCaller The caller for logging.
|
---|
4135 | */
|
---|
4136 | static uint32_t iemNativeSimdRegMoveVar(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVar,
|
---|
4137 | uint8_t idxRegOld, uint8_t idxRegNew, const char *pszCaller)
|
---|
4138 | {
|
---|
4139 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
4140 | Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxRegOld);
|
---|
4141 | Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fSimdReg);
|
---|
4142 | RT_NOREF(pszCaller);
|
---|
4143 |
|
---|
4144 | Assert(!( (pReNative->Core.bmGstSimdRegShadowDirtyLo128 | pReNative->Core.bmGstSimdRegShadowDirtyHi128)
|
---|
4145 | & pReNative->Core.aHstSimdRegs[idxRegNew].fGstRegShadows));
|
---|
4146 | iemNativeSimdRegClearGstSimdRegShadowing(pReNative, idxRegNew, off);
|
---|
4147 |
|
---|
4148 | uint64_t fGstRegShadows = pReNative->Core.aHstSimdRegs[idxRegOld].fGstRegShadows;
|
---|
4149 | Assert(!( (pReNative->Core.bmGstSimdRegShadowDirtyLo128 | pReNative->Core.bmGstSimdRegShadowDirtyHi128)
|
---|
4150 | & pReNative->Core.aHstSimdRegs[idxRegOld].fGstRegShadows));
|
---|
4151 |
|
---|
4152 | Log12(("%s: moving idxVar=%#x from %s to %s (fGstRegShadows=%RX64)\n",
|
---|
4153 | pszCaller, idxVar, g_apszIemNativeHstSimdRegNames[idxRegOld], g_apszIemNativeHstSimdRegNames[idxRegNew], fGstRegShadows));
|
---|
4154 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegNew, idxRegOld);
|
---|
4155 |
|
---|
4156 | if (pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].cbVar == sizeof(RTUINT128U))
|
---|
4157 | off = iemNativeEmitSimdLoadVecRegFromVecRegU128(pReNative, off, idxRegNew, idxRegOld);
|
---|
4158 | else
|
---|
4159 | {
|
---|
4160 | Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].cbVar == sizeof(RTUINT256U));
|
---|
4161 | off = iemNativeEmitSimdLoadVecRegFromVecRegU256(pReNative, off, idxRegNew, idxRegOld);
|
---|
4162 | }
|
---|
4163 |
|
---|
4164 | pReNative->Core.aHstSimdRegs[idxRegNew].fGstRegShadows = fGstRegShadows;
|
---|
4165 | pReNative->Core.aHstSimdRegs[idxRegNew].enmWhat = kIemNativeWhat_Var;
|
---|
4166 | pReNative->Core.aHstSimdRegs[idxRegNew].idxVar = idxVar;
|
---|
4167 | if (fGstRegShadows)
|
---|
4168 | {
|
---|
4169 | pReNative->Core.bmHstSimdRegsWithGstShadow = (pReNative->Core.bmHstSimdRegsWithGstShadow & ~RT_BIT_32(idxRegOld))
|
---|
4170 | | RT_BIT_32(idxRegNew);
|
---|
4171 | while (fGstRegShadows)
|
---|
4172 | {
|
---|
4173 | unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegShadows) - 1;
|
---|
4174 | fGstRegShadows &= ~RT_BIT_64(idxGstReg);
|
---|
4175 |
|
---|
4176 | Assert(pReNative->Core.aidxGstSimdRegShadows[idxGstReg] == idxRegOld);
|
---|
4177 | pReNative->Core.aidxGstSimdRegShadows[idxGstReg] = idxRegNew;
|
---|
4178 | }
|
---|
4179 | }
|
---|
4180 |
|
---|
4181 | pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg = (uint8_t)idxRegNew;
|
---|
4182 | pReNative->Core.aHstSimdRegs[idxRegOld].fGstRegShadows = 0;
|
---|
4183 | pReNative->Core.bmHstSimdRegs = RT_BIT_32(idxRegNew) | (pReNative->Core.bmHstSimdRegs & ~RT_BIT_32(idxRegOld));
|
---|
4184 | return off;
|
---|
4185 | }
|
---|
4186 |
|
---|
4187 |
|
---|
4188 | /**
|
---|
4189 | * Moves a variable to a different register or spills it onto the stack.
|
---|
4190 | *
|
---|
4191 | * This must be a stack variable (kIemNativeVarKind_Stack) because the other
|
---|
4192 | * kinds can easily be recreated if needed later.
|
---|
4193 | *
|
---|
4194 | * @returns The new code buffer position.
|
---|
4195 | * @param pReNative The native recompile state.
|
---|
4196 | * @param off The current code buffer position.
|
---|
4197 | * @param idxVar The variable index.
|
---|
4198 | * @param fForbiddenRegs Mask of the forbidden registers. Defaults to
|
---|
4199 | * call-volatile registers.
|
---|
4200 | */
|
---|
4201 | DECL_HIDDEN_THROW(uint32_t) iemNativeSimdRegMoveOrSpillStackVar(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVar,
|
---|
4202 | uint32_t fForbiddenRegs /*= IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK*/)
|
---|
4203 | {
|
---|
4204 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
4205 | PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
|
---|
4206 | Assert(pVar->enmKind == kIemNativeVarKind_Stack);
|
---|
4207 | Assert(!pVar->fRegAcquired);
|
---|
4208 | Assert(!pVar->fSimdReg);
|
---|
4209 |
|
---|
4210 | uint8_t const idxRegOld = pVar->idxReg;
|
---|
4211 | Assert(idxRegOld < RT_ELEMENTS(pReNative->Core.aHstSimdRegs));
|
---|
4212 | Assert(pReNative->Core.bmHstSimdRegs & RT_BIT_32(idxRegOld));
|
---|
4213 | Assert(pReNative->Core.aHstSimdRegs[idxRegOld].enmWhat == kIemNativeWhat_Var);
|
---|
4214 | Assert( (pReNative->Core.bmGstSimdRegShadows & pReNative->Core.aHstSimdRegs[idxRegOld].fGstRegShadows)
|
---|
4215 | == pReNative->Core.aHstSimdRegs[idxRegOld].fGstRegShadows);
|
---|
4216 | Assert(pReNative->Core.bmGstSimdRegShadows < RT_BIT_64(kIemNativeGstReg_End));
|
---|
4217 | Assert( RT_BOOL(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxRegOld))
|
---|
4218 | == RT_BOOL(pReNative->Core.aHstSimdRegs[idxRegOld].fGstRegShadows));
|
---|
4219 | Assert(!( (pReNative->Core.bmGstSimdRegShadowDirtyLo128 | pReNative->Core.bmGstSimdRegShadowDirtyHi128)
|
---|
4220 | & pReNative->Core.aHstSimdRegs[idxRegOld].fGstRegShadows));
|
---|
4221 |
|
---|
4222 | /** @todo Add statistics on this.*/
|
---|
4223 | /** @todo Implement basic variable liveness analysis (python) so variables
|
---|
4224 | * can be freed immediately once no longer used. This has the potential to
|
---|
4225 | * be trashing registers and stack for dead variables.
|
---|
4226 | * Update: This is mostly done. (Not IEMNATIVE_WITH_LIVENESS_ANALYSIS.) */
|
---|
4227 |
|
---|
4228 | /*
|
---|
4229 | * First try move it to a different register, as that's cheaper.
|
---|
4230 | */
|
---|
4231 | fForbiddenRegs |= RT_BIT_32(idxRegOld);
|
---|
4232 | fForbiddenRegs |= IEMNATIVE_SIMD_REG_FIXED_MASK;
|
---|
4233 | uint32_t fRegs = ~pReNative->Core.bmHstSimdRegs & ~fForbiddenRegs;
|
---|
4234 | if (fRegs)
|
---|
4235 | {
|
---|
4236 | /* Avoid using shadow registers, if possible. */
|
---|
4237 | if (fRegs & ~pReNative->Core.bmHstSimdRegsWithGstShadow)
|
---|
4238 | fRegs &= ~pReNative->Core.bmHstSimdRegsWithGstShadow;
|
---|
4239 | unsigned const idxRegNew = ASMBitFirstSetU32(fRegs) - 1;
|
---|
4240 | return iemNativeSimdRegMoveVar(pReNative, off, idxVar, idxRegOld, idxRegNew, "iemNativeSimdRegMoveOrSpillStackVar");
|
---|
4241 | }
|
---|
4242 |
|
---|
4243 | /*
|
---|
4244 | * Otherwise we must spill the register onto the stack.
|
---|
4245 | */
|
---|
4246 | uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
|
---|
4247 | Log12(("iemNativeSimdRegMoveOrSpillStackVar: spilling idxVar=%#x/idxReg=%d onto the stack (slot %#x bp+%d, off=%#x)\n",
|
---|
4248 | idxVar, idxRegOld, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
|
---|
4249 |
|
---|
4250 | if (pVar->cbVar == sizeof(RTUINT128U))
|
---|
4251 | off = iemNativeEmitStoreVecRegByBpU128(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxRegOld);
|
---|
4252 | else
|
---|
4253 | {
|
---|
4254 | Assert(pVar->cbVar == sizeof(RTUINT256U));
|
---|
4255 | off = iemNativeEmitStoreVecRegByBpU256(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxRegOld);
|
---|
4256 | }
|
---|
4257 |
|
---|
4258 | pVar->idxReg = UINT8_MAX;
|
---|
4259 | pReNative->Core.bmHstSimdRegsWithGstShadow &= ~RT_BIT_32(idxRegOld);
|
---|
4260 | pReNative->Core.bmHstSimdRegs &= ~RT_BIT_32(idxRegOld);
|
---|
4261 | pReNative->Core.bmGstSimdRegShadows &= ~pReNative->Core.aHstSimdRegs[idxRegOld].fGstRegShadows;
|
---|
4262 | pReNative->Core.aHstSimdRegs[idxRegOld].fGstRegShadows = 0;
|
---|
4263 | return off;
|
---|
4264 | }
|
---|
4265 |
|
---|
4266 |
|
---|
4267 | /**
|
---|
4268 | * Called right before emitting a call instruction to move anything important
|
---|
4269 | * out of call-volatile SIMD registers, free and flush the call-volatile SIMD registers,
|
---|
4270 | * optionally freeing argument variables.
|
---|
4271 | *
|
---|
4272 | * @returns New code buffer offset, UINT32_MAX on failure.
|
---|
4273 | * @param pReNative The native recompile state.
|
---|
4274 | * @param off The code buffer offset.
|
---|
4275 | * @param cArgs The number of arguments the function call takes.
|
---|
4276 | * It is presumed that the host register part of these have
|
---|
4277 | * been allocated as such already and won't need moving,
|
---|
4278 | * just freeing.
|
---|
4279 | * @param fKeepVars Mask of variables that should keep their register
|
---|
4280 | * assignments. Caller must take care to handle these.
|
---|
4281 | */
|
---|
4282 | DECL_HIDDEN_THROW(uint32_t)
|
---|
4283 | iemNativeSimdRegMoveAndFreeAndFlushAtCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs, uint32_t fKeepVars /*= 0*/)
|
---|
4284 | {
|
---|
4285 | Assert(!cArgs); RT_NOREF(cArgs);
|
---|
4286 |
|
---|
4287 | /* fKeepVars will reduce this mask. */
|
---|
4288 | uint32_t fSimdRegsToFree = IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK;
|
---|
4289 |
|
---|
4290 | /*
|
---|
4291 | * Move anything important out of volatile registers.
|
---|
4292 | */
|
---|
4293 | uint32_t fSimdRegsToMove = IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK
|
---|
4294 | #ifdef IEMNATIVE_SIMD_REG_FIXED_TMP0
|
---|
4295 | & ~RT_BIT_32(IEMNATIVE_SIMD_REG_FIXED_TMP0)
|
---|
4296 | #endif
|
---|
4297 | ;
|
---|
4298 |
|
---|
4299 | fSimdRegsToMove &= pReNative->Core.bmHstSimdRegs;
|
---|
4300 | if (!fSimdRegsToMove)
|
---|
4301 | { /* likely */ }
|
---|
4302 | else
|
---|
4303 | {
|
---|
4304 | Log12(("iemNativeSimdRegMoveAndFreeAndFlushAtCall: fSimdRegsToMove=%#x\n", fSimdRegsToMove));
|
---|
4305 | while (fSimdRegsToMove != 0)
|
---|
4306 | {
|
---|
4307 | unsigned const idxSimdReg = ASMBitFirstSetU32(fSimdRegsToMove) - 1;
|
---|
4308 | fSimdRegsToMove &= ~RT_BIT_32(idxSimdReg);
|
---|
4309 |
|
---|
4310 | switch (pReNative->Core.aHstSimdRegs[idxSimdReg].enmWhat)
|
---|
4311 | {
|
---|
4312 | case kIemNativeWhat_Var:
|
---|
4313 | {
|
---|
4314 | uint8_t const idxVar = pReNative->Core.aHstRegs[idxSimdReg].idxVar;
|
---|
4315 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
4316 | PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
|
---|
4317 | Assert(pVar->idxReg == idxSimdReg);
|
---|
4318 | Assert(pVar->fSimdReg);
|
---|
4319 | if (!(RT_BIT_32(IEMNATIVE_VAR_IDX_UNPACK(idxVar)) & fKeepVars))
|
---|
4320 | {
|
---|
4321 | Log12(("iemNativeSimdRegMoveAndFreeAndFlushAtCall: idxVar=%#x enmKind=%d idxSimdReg=%d\n",
|
---|
4322 | idxVar, pVar->enmKind, pVar->idxReg));
|
---|
4323 | if (pVar->enmKind != kIemNativeVarKind_Stack)
|
---|
4324 | pVar->idxReg = UINT8_MAX;
|
---|
4325 | else
|
---|
4326 | off = iemNativeSimdRegMoveOrSpillStackVar(pReNative, off, idxVar);
|
---|
4327 | }
|
---|
4328 | else
|
---|
4329 | fSimdRegsToFree &= ~RT_BIT_32(idxSimdReg);
|
---|
4330 | continue;
|
---|
4331 | }
|
---|
4332 |
|
---|
4333 | case kIemNativeWhat_Arg:
|
---|
4334 | AssertMsgFailed(("What?!?: %u\n", idxSimdReg));
|
---|
4335 | continue;
|
---|
4336 |
|
---|
4337 | case kIemNativeWhat_rc:
|
---|
4338 | case kIemNativeWhat_Tmp:
|
---|
4339 | AssertMsgFailed(("Missing free: %u\n", idxSimdReg));
|
---|
4340 | continue;
|
---|
4341 |
|
---|
4342 | case kIemNativeWhat_FixedReserved:
|
---|
4343 | #ifdef RT_ARCH_ARM64
|
---|
4344 | continue; /* On ARM the upper half of the virtual 256-bit register. */
|
---|
4345 | #endif
|
---|
4346 |
|
---|
4347 | case kIemNativeWhat_FixedTmp:
|
---|
4348 | case kIemNativeWhat_pVCpuFixed:
|
---|
4349 | case kIemNativeWhat_pCtxFixed:
|
---|
4350 | case kIemNativeWhat_PcShadow:
|
---|
4351 | case kIemNativeWhat_Invalid:
|
---|
4352 | case kIemNativeWhat_End:
|
---|
4353 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_1));
|
---|
4354 | }
|
---|
4355 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_2));
|
---|
4356 | }
|
---|
4357 | }
|
---|
4358 |
|
---|
4359 | /*
|
---|
4360 | * Do the actual freeing.
|
---|
4361 | */
|
---|
4362 | if (pReNative->Core.bmHstSimdRegs & fSimdRegsToFree)
|
---|
4363 | Log12(("iemNativeSimdRegMoveAndFreeAndFlushAtCall: bmHstSimdRegs %#x -> %#x\n",
|
---|
4364 | pReNative->Core.bmHstSimdRegs, pReNative->Core.bmHstSimdRegs & ~fSimdRegsToFree));
|
---|
4365 | pReNative->Core.bmHstSimdRegs &= ~fSimdRegsToFree;
|
---|
4366 |
|
---|
4367 | /* If there are guest register shadows in any call-volatile register, we
|
---|
4368 | have to clear the corrsponding guest register masks for each register. */
|
---|
4369 | uint32_t fHstSimdRegsWithGstShadow = pReNative->Core.bmHstSimdRegsWithGstShadow & fSimdRegsToFree;
|
---|
4370 | if (fHstSimdRegsWithGstShadow)
|
---|
4371 | {
|
---|
4372 | Log12(("iemNativeSimdRegMoveAndFreeAndFlushAtCall: bmHstSimdRegsWithGstShadow %#RX32 -> %#RX32; removed %#RX32\n",
|
---|
4373 | pReNative->Core.bmHstSimdRegsWithGstShadow, pReNative->Core.bmHstSimdRegsWithGstShadow & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK, fHstSimdRegsWithGstShadow));
|
---|
4374 | pReNative->Core.bmHstSimdRegsWithGstShadow &= ~fHstSimdRegsWithGstShadow;
|
---|
4375 | do
|
---|
4376 | {
|
---|
4377 | unsigned const idxSimdReg = ASMBitFirstSetU32(fHstSimdRegsWithGstShadow) - 1;
|
---|
4378 | fHstSimdRegsWithGstShadow &= ~RT_BIT_32(idxSimdReg);
|
---|
4379 |
|
---|
4380 | AssertMsg(pReNative->Core.aHstSimdRegs[idxSimdReg].fGstRegShadows != 0, ("idxSimdReg=%#x\n", idxSimdReg));
|
---|
4381 |
|
---|
4382 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
4383 | /*
|
---|
4384 | * Flush any pending writes now (might have been skipped earlier in iemEmitCallCommon() but it doesn't apply
|
---|
4385 | * to call volatile registers).
|
---|
4386 | */
|
---|
4387 | if ( (pReNative->Core.bmGstSimdRegShadowDirtyLo128 | pReNative->Core.bmGstSimdRegShadowDirtyHi128)
|
---|
4388 | & pReNative->Core.aHstSimdRegs[idxSimdReg].fGstRegShadows)
|
---|
4389 | off = iemNativeSimdRegFlushDirtyGuestByHostSimdRegShadow(pReNative, off, idxSimdReg);
|
---|
4390 | #endif
|
---|
4391 | Assert(!( (pReNative->Core.bmGstSimdRegShadowDirtyLo128 | pReNative->Core.bmGstSimdRegShadowDirtyHi128)
|
---|
4392 | & pReNative->Core.aHstSimdRegs[idxSimdReg].fGstRegShadows));
|
---|
4393 |
|
---|
4394 | pReNative->Core.bmGstSimdRegShadows &= ~pReNative->Core.aHstSimdRegs[idxSimdReg].fGstRegShadows;
|
---|
4395 | pReNative->Core.aHstSimdRegs[idxSimdReg].fGstRegShadows = 0;
|
---|
4396 | } while (fHstSimdRegsWithGstShadow != 0);
|
---|
4397 | }
|
---|
4398 |
|
---|
4399 | return off;
|
---|
4400 | }
|
---|
4401 | #endif
|
---|
4402 |
|
---|
4403 |
|
---|
4404 | /**
|
---|
4405 | * Called right before emitting a call instruction to move anything important
|
---|
4406 | * out of call-volatile registers, free and flush the call-volatile registers,
|
---|
4407 | * optionally freeing argument variables.
|
---|
4408 | *
|
---|
4409 | * @returns New code buffer offset, UINT32_MAX on failure.
|
---|
4410 | * @param pReNative The native recompile state.
|
---|
4411 | * @param off The code buffer offset.
|
---|
4412 | * @param cArgs The number of arguments the function call takes.
|
---|
4413 | * It is presumed that the host register part of these have
|
---|
4414 | * been allocated as such already and won't need moving,
|
---|
4415 | * just freeing.
|
---|
4416 | * @param fKeepVars Mask of variables that should keep their register
|
---|
4417 | * assignments. Caller must take care to handle these.
|
---|
4418 | */
|
---|
4419 | DECL_HIDDEN_THROW(uint32_t)
|
---|
4420 | iemNativeRegMoveAndFreeAndFlushAtCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs, uint32_t fKeepVars /*= 0*/)
|
---|
4421 | {
|
---|
4422 | Assert(cArgs <= IEMNATIVE_CALL_MAX_ARG_COUNT);
|
---|
4423 |
|
---|
4424 | /* fKeepVars will reduce this mask. */
|
---|
4425 | uint32_t fRegsToFree = IEMNATIVE_CALL_VOLATILE_NOTMP_GREG_MASK;
|
---|
4426 |
|
---|
4427 | #ifdef RT_ARCH_ARM64
|
---|
4428 | AssertCompile(IEMNATIVE_CALL_VOLATILE_NOTMP_GREG_MASK == UINT32_C(0x37fff));
|
---|
4429 | #endif
|
---|
4430 |
|
---|
4431 | /*
|
---|
4432 | * Move anything important out of volatile registers.
|
---|
4433 | */
|
---|
4434 | if (cArgs > RT_ELEMENTS(g_aidxIemNativeCallRegs))
|
---|
4435 | cArgs = RT_ELEMENTS(g_aidxIemNativeCallRegs);
|
---|
4436 | uint32_t fRegsToMove = IEMNATIVE_CALL_VOLATILE_NOTMP_GREG_MASK
|
---|
4437 | #ifdef IEMNATIVE_REG_FIXED_PC_DBG
|
---|
4438 | & ~RT_BIT_32(IEMNATIVE_REG_FIXED_PC_DBG)
|
---|
4439 | #endif
|
---|
4440 | & ~g_afIemNativeCallRegs[cArgs];
|
---|
4441 |
|
---|
4442 | fRegsToMove &= pReNative->Core.bmHstRegs;
|
---|
4443 | if (!fRegsToMove)
|
---|
4444 | { /* likely */ }
|
---|
4445 | else
|
---|
4446 | {
|
---|
4447 | Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: fRegsToMove=%#x\n", fRegsToMove));
|
---|
4448 | while (fRegsToMove != 0)
|
---|
4449 | {
|
---|
4450 | unsigned const idxReg = ASMBitFirstSetU32(fRegsToMove) - 1;
|
---|
4451 | fRegsToMove &= ~RT_BIT_32(idxReg);
|
---|
4452 |
|
---|
4453 | switch (pReNative->Core.aHstRegs[idxReg].enmWhat)
|
---|
4454 | {
|
---|
4455 | case kIemNativeWhat_Var:
|
---|
4456 | {
|
---|
4457 | uint8_t const idxVar = pReNative->Core.aHstRegs[idxReg].idxVar;
|
---|
4458 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
4459 | PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
|
---|
4460 | Assert(pVar->idxReg == idxReg);
|
---|
4461 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
4462 | Assert(!pVar->fSimdReg);
|
---|
4463 | #endif
|
---|
4464 | if (!(RT_BIT_32(IEMNATIVE_VAR_IDX_UNPACK(idxVar)) & fKeepVars))
|
---|
4465 | {
|
---|
4466 | Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: idxVar=%#x enmKind=%d idxReg=%d\n",
|
---|
4467 | idxVar, pVar->enmKind, pVar->idxReg));
|
---|
4468 | if (pVar->enmKind != kIemNativeVarKind_Stack)
|
---|
4469 | pVar->idxReg = UINT8_MAX;
|
---|
4470 | else
|
---|
4471 | off = iemNativeRegMoveOrSpillStackVar(pReNative, off, idxVar);
|
---|
4472 | }
|
---|
4473 | else
|
---|
4474 | fRegsToFree &= ~RT_BIT_32(idxReg);
|
---|
4475 | continue;
|
---|
4476 | }
|
---|
4477 |
|
---|
4478 | case kIemNativeWhat_Arg:
|
---|
4479 | AssertMsgFailed(("What?!?: %u\n", idxReg));
|
---|
4480 | continue;
|
---|
4481 |
|
---|
4482 | case kIemNativeWhat_rc:
|
---|
4483 | case kIemNativeWhat_Tmp:
|
---|
4484 | AssertMsgFailed(("Missing free: %u\n", idxReg));
|
---|
4485 | continue;
|
---|
4486 |
|
---|
4487 | case kIemNativeWhat_FixedTmp:
|
---|
4488 | case kIemNativeWhat_pVCpuFixed:
|
---|
4489 | case kIemNativeWhat_pCtxFixed:
|
---|
4490 | case kIemNativeWhat_PcShadow:
|
---|
4491 | case kIemNativeWhat_FixedReserved:
|
---|
4492 | case kIemNativeWhat_Invalid:
|
---|
4493 | case kIemNativeWhat_End:
|
---|
4494 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_1));
|
---|
4495 | }
|
---|
4496 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_2));
|
---|
4497 | }
|
---|
4498 | }
|
---|
4499 |
|
---|
4500 | /*
|
---|
4501 | * Do the actual freeing.
|
---|
4502 | */
|
---|
4503 | if (pReNative->Core.bmHstRegs & fRegsToFree)
|
---|
4504 | Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: bmHstRegs %#x -> %#x\n",
|
---|
4505 | pReNative->Core.bmHstRegs, pReNative->Core.bmHstRegs & ~fRegsToFree));
|
---|
4506 | pReNative->Core.bmHstRegs &= ~fRegsToFree;
|
---|
4507 |
|
---|
4508 | /* If there are guest register shadows in any call-volatile register, we
|
---|
4509 | have to clear the corrsponding guest register masks for each register. */
|
---|
4510 | uint32_t fHstRegsWithGstShadow = pReNative->Core.bmHstRegsWithGstShadow & fRegsToFree;
|
---|
4511 | if (fHstRegsWithGstShadow)
|
---|
4512 | {
|
---|
4513 | Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: bmHstRegsWithGstShadow %#RX32 -> %#RX32; removed %#RX32\n",
|
---|
4514 | pReNative->Core.bmHstRegsWithGstShadow, pReNative->Core.bmHstRegsWithGstShadow & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK,
|
---|
4515 | fHstRegsWithGstShadow));
|
---|
4516 | pReNative->Core.bmHstRegsWithGstShadow &= ~fHstRegsWithGstShadow;
|
---|
4517 | do
|
---|
4518 | {
|
---|
4519 | unsigned const idxReg = ASMBitFirstSetU32(fHstRegsWithGstShadow) - 1;
|
---|
4520 | fHstRegsWithGstShadow &= ~RT_BIT_32(idxReg);
|
---|
4521 |
|
---|
4522 | AssertMsg(pReNative->Core.aHstRegs[idxReg].fGstRegShadows != 0, ("idxReg=%#x\n", idxReg));
|
---|
4523 |
|
---|
4524 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
4525 | /*
|
---|
4526 | * Flush any pending writes now (might have been skipped earlier in iemEmitCallCommon() but it doesn't apply
|
---|
4527 | * to call volatile registers).
|
---|
4528 | */
|
---|
4529 | if (pReNative->Core.bmGstRegShadowDirty & pReNative->Core.aHstRegs[idxReg].fGstRegShadows)
|
---|
4530 | off = iemNativeRegFlushDirtyGuestByHostRegShadow(pReNative, off, idxReg);
|
---|
4531 | Assert(!(pReNative->Core.bmGstRegShadowDirty & pReNative->Core.aHstRegs[idxReg].fGstRegShadows));
|
---|
4532 | #endif
|
---|
4533 |
|
---|
4534 | pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxReg].fGstRegShadows;
|
---|
4535 | pReNative->Core.aHstRegs[idxReg].fGstRegShadows = 0;
|
---|
4536 | } while (fHstRegsWithGstShadow != 0);
|
---|
4537 | }
|
---|
4538 |
|
---|
4539 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
4540 | /* Now for the SIMD registers, no argument support for now. */
|
---|
4541 | off = iemNativeSimdRegMoveAndFreeAndFlushAtCall(pReNative, off, 0 /*cArgs*/, fKeepVars);
|
---|
4542 | #endif
|
---|
4543 |
|
---|
4544 | return off;
|
---|
4545 | }
|
---|
4546 |
|
---|
4547 |
|
---|
4548 | /**
|
---|
4549 | * Flushes a set of guest register shadow copies.
|
---|
4550 | *
|
---|
4551 | * This is usually done after calling a threaded function or a C-implementation
|
---|
4552 | * of an instruction.
|
---|
4553 | *
|
---|
4554 | * @param pReNative The native recompile state.
|
---|
4555 | * @param fGstRegs Set of guest registers to flush.
|
---|
4556 | */
|
---|
4557 | DECLHIDDEN(void) iemNativeRegFlushGuestShadows(PIEMRECOMPILERSTATE pReNative, uint64_t fGstRegs) RT_NOEXCEPT
|
---|
4558 | {
|
---|
4559 | /*
|
---|
4560 | * Reduce the mask by what's currently shadowed
|
---|
4561 | */
|
---|
4562 | uint64_t const bmGstRegShadowsOld = pReNative->Core.bmGstRegShadows;
|
---|
4563 | fGstRegs &= bmGstRegShadowsOld;
|
---|
4564 | if (fGstRegs)
|
---|
4565 | {
|
---|
4566 | uint64_t const bmGstRegShadowsNew = bmGstRegShadowsOld & ~fGstRegs;
|
---|
4567 | Log12(("iemNativeRegFlushGuestShadows: flushing %#RX64 (%#RX64 -> %#RX64)\n", fGstRegs, bmGstRegShadowsOld, bmGstRegShadowsNew));
|
---|
4568 | pReNative->Core.bmGstRegShadows = bmGstRegShadowsNew;
|
---|
4569 | if (bmGstRegShadowsNew)
|
---|
4570 | {
|
---|
4571 | /*
|
---|
4572 | * Partial.
|
---|
4573 | */
|
---|
4574 | do
|
---|
4575 | {
|
---|
4576 | unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegs) - 1;
|
---|
4577 | uint8_t const idxHstReg = pReNative->Core.aidxGstRegShadows[idxGstReg];
|
---|
4578 | Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aidxGstRegShadows));
|
---|
4579 | Assert(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxHstReg));
|
---|
4580 | Assert(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & RT_BIT_64(idxGstReg));
|
---|
4581 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
4582 | Assert(!(pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(idxGstReg)));
|
---|
4583 | #endif
|
---|
4584 |
|
---|
4585 | uint64_t const fInThisHstReg = (pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & fGstRegs) | RT_BIT_64(idxGstReg);
|
---|
4586 | fGstRegs &= ~fInThisHstReg;
|
---|
4587 | uint64_t const fGstRegShadowsNew = pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & ~fInThisHstReg;
|
---|
4588 | pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = fGstRegShadowsNew;
|
---|
4589 | if (!fGstRegShadowsNew)
|
---|
4590 | pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxHstReg);
|
---|
4591 | } while (fGstRegs != 0);
|
---|
4592 | }
|
---|
4593 | else
|
---|
4594 | {
|
---|
4595 | /*
|
---|
4596 | * Clear all.
|
---|
4597 | */
|
---|
4598 | do
|
---|
4599 | {
|
---|
4600 | unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegs) - 1;
|
---|
4601 | uint8_t const idxHstReg = pReNative->Core.aidxGstRegShadows[idxGstReg];
|
---|
4602 | Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aidxGstRegShadows));
|
---|
4603 | Assert(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxHstReg));
|
---|
4604 | Assert(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & RT_BIT_64(idxGstReg));
|
---|
4605 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
4606 | Assert(!(pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(idxGstReg)));
|
---|
4607 | #endif
|
---|
4608 |
|
---|
4609 | fGstRegs &= ~(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows | RT_BIT_64(idxGstReg));
|
---|
4610 | pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = 0;
|
---|
4611 | } while (fGstRegs != 0);
|
---|
4612 | pReNative->Core.bmHstRegsWithGstShadow = 0;
|
---|
4613 | }
|
---|
4614 | }
|
---|
4615 | }
|
---|
4616 |
|
---|
4617 |
|
---|
4618 | /**
|
---|
4619 | * Flushes guest register shadow copies held by a set of host registers.
|
---|
4620 | *
|
---|
4621 | * This is used with the TLB lookup code for ensuring that we don't carry on
|
---|
4622 | * with any guest shadows in volatile registers, as these will get corrupted by
|
---|
4623 | * a TLB miss.
|
---|
4624 | *
|
---|
4625 | * @param pReNative The native recompile state.
|
---|
4626 | * @param fHstRegs Set of host registers to flush guest shadows for.
|
---|
4627 | */
|
---|
4628 | DECLHIDDEN(void) iemNativeRegFlushGuestShadowsByHostMask(PIEMRECOMPILERSTATE pReNative, uint32_t fHstRegs) RT_NOEXCEPT
|
---|
4629 | {
|
---|
4630 | /*
|
---|
4631 | * Reduce the mask by what's currently shadowed.
|
---|
4632 | */
|
---|
4633 | uint32_t const bmHstRegsWithGstShadowOld = pReNative->Core.bmHstRegsWithGstShadow;
|
---|
4634 | fHstRegs &= bmHstRegsWithGstShadowOld;
|
---|
4635 | if (fHstRegs)
|
---|
4636 | {
|
---|
4637 | uint32_t const bmHstRegsWithGstShadowNew = bmHstRegsWithGstShadowOld & ~fHstRegs;
|
---|
4638 | Log12(("iemNativeRegFlushGuestShadowsByHostMask: flushing %#RX32 (%#RX32 -> %#RX32)\n",
|
---|
4639 | fHstRegs, bmHstRegsWithGstShadowOld, bmHstRegsWithGstShadowNew));
|
---|
4640 | pReNative->Core.bmHstRegsWithGstShadow = bmHstRegsWithGstShadowNew;
|
---|
4641 | if (bmHstRegsWithGstShadowNew)
|
---|
4642 | {
|
---|
4643 | /*
|
---|
4644 | * Partial (likely).
|
---|
4645 | */
|
---|
4646 | uint64_t fGstShadows = 0;
|
---|
4647 | do
|
---|
4648 | {
|
---|
4649 | unsigned const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1;
|
---|
4650 | Assert(!(pReNative->Core.bmHstRegs & RT_BIT_32(idxHstReg)));
|
---|
4651 | Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows)
|
---|
4652 | == pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows);
|
---|
4653 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
4654 | Assert(!(pReNative->Core.bmGstRegShadowDirty & pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows));
|
---|
4655 | #endif
|
---|
4656 |
|
---|
4657 | fGstShadows |= pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows;
|
---|
4658 | pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = 0;
|
---|
4659 | fHstRegs &= ~RT_BIT_32(idxHstReg);
|
---|
4660 | } while (fHstRegs != 0);
|
---|
4661 | pReNative->Core.bmGstRegShadows &= ~fGstShadows;
|
---|
4662 | }
|
---|
4663 | else
|
---|
4664 | {
|
---|
4665 | /*
|
---|
4666 | * Clear all.
|
---|
4667 | */
|
---|
4668 | do
|
---|
4669 | {
|
---|
4670 | unsigned const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1;
|
---|
4671 | Assert(!(pReNative->Core.bmHstRegs & RT_BIT_32(idxHstReg)));
|
---|
4672 | Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows)
|
---|
4673 | == pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows);
|
---|
4674 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
4675 | Assert(!(pReNative->Core.bmGstRegShadowDirty & pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows));
|
---|
4676 | #endif
|
---|
4677 |
|
---|
4678 | pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = 0;
|
---|
4679 | fHstRegs &= ~RT_BIT_32(idxHstReg);
|
---|
4680 | } while (fHstRegs != 0);
|
---|
4681 | pReNative->Core.bmGstRegShadows = 0;
|
---|
4682 | }
|
---|
4683 | }
|
---|
4684 | }
|
---|
4685 |
|
---|
4686 |
|
---|
4687 | /**
|
---|
4688 | * Restores guest shadow copies in volatile registers.
|
---|
4689 | *
|
---|
4690 | * This is used after calling a helper function (think TLB miss) to restore the
|
---|
4691 | * register state of volatile registers.
|
---|
4692 | *
|
---|
4693 | * @param pReNative The native recompile state.
|
---|
4694 | * @param off The code buffer offset.
|
---|
4695 | * @param fHstRegsActiveShadows Set of host registers which are allowed to
|
---|
4696 | * be active (allocated) w/o asserting. Hack.
|
---|
4697 | * @see iemNativeVarSaveVolatileRegsPreHlpCall(),
|
---|
4698 | * iemNativeVarRestoreVolatileRegsPostHlpCall()
|
---|
4699 | */
|
---|
4700 | DECL_HIDDEN_THROW(uint32_t)
|
---|
4701 | iemNativeRegRestoreGuestShadowsInVolatileRegs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fHstRegsActiveShadows)
|
---|
4702 | {
|
---|
4703 | uint32_t fHstRegs = pReNative->Core.bmHstRegsWithGstShadow & IEMNATIVE_CALL_VOLATILE_GREG_MASK;
|
---|
4704 | if (fHstRegs)
|
---|
4705 | {
|
---|
4706 | Log12(("iemNativeRegRestoreGuestShadowsInVolatileRegs: %#RX32\n", fHstRegs));
|
---|
4707 | do
|
---|
4708 | {
|
---|
4709 | unsigned const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1;
|
---|
4710 |
|
---|
4711 | /* It's not fatal if a register is active holding a variable that
|
---|
4712 | shadowing a guest register, ASSUMING all pending guest register
|
---|
4713 | writes were flushed prior to the helper call. However, we'll be
|
---|
4714 | emitting duplicate restores, so it wasts code space. */
|
---|
4715 | Assert(!(pReNative->Core.bmHstRegs & ~fHstRegsActiveShadows & RT_BIT_32(idxHstReg)));
|
---|
4716 | RT_NOREF(fHstRegsActiveShadows);
|
---|
4717 |
|
---|
4718 | uint64_t const fGstRegShadows = pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows;
|
---|
4719 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
4720 | Assert(!(pReNative->Core.bmGstRegShadowDirty & fGstRegShadows));
|
---|
4721 | #endif
|
---|
4722 | Assert((pReNative->Core.bmGstRegShadows & fGstRegShadows) == fGstRegShadows);
|
---|
4723 | AssertStmt(fGstRegShadows != 0 && fGstRegShadows < RT_BIT_64(kIemNativeGstReg_End),
|
---|
4724 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_12));
|
---|
4725 |
|
---|
4726 | unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegShadows) - 1;
|
---|
4727 | off = iemNativeEmitLoadGprWithGstShadowReg(pReNative, off, idxHstReg, (IEMNATIVEGSTREG)idxGstReg);
|
---|
4728 |
|
---|
4729 | fHstRegs &= ~RT_BIT_32(idxHstReg);
|
---|
4730 | } while (fHstRegs != 0);
|
---|
4731 | }
|
---|
4732 | return off;
|
---|
4733 | }
|
---|
4734 |
|
---|
4735 |
|
---|
4736 |
|
---|
4737 |
|
---|
4738 | /*********************************************************************************************************************************
|
---|
4739 | * SIMD register allocator (largely code duplication of the GPR allocator for now but might diverge) *
|
---|
4740 | *********************************************************************************************************************************/
|
---|
4741 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
4742 |
|
---|
4743 | /**
|
---|
4744 | * Info about shadowed guest SIMD register values.
|
---|
4745 | * @see IEMNATIVEGSTSIMDREG
|
---|
4746 | */
|
---|
4747 | static struct
|
---|
4748 | {
|
---|
4749 | /** Offset in VMCPU of XMM (low 128-bit) registers. */
|
---|
4750 | uint32_t offXmm;
|
---|
4751 | /** Offset in VMCPU of YmmHi (high 128-bit) registers. */
|
---|
4752 | uint32_t offYmm;
|
---|
4753 | /** Name (for logging). */
|
---|
4754 | const char *pszName;
|
---|
4755 | } const g_aGstSimdShadowInfo[] =
|
---|
4756 | {
|
---|
4757 | #define CPUMCTX_OFF_AND_SIZE(a_iSimdReg) (uint32_t)RT_UOFFSETOF(VMCPU, cpum.GstCtx.XState.x87.aXMM[a_iSimdReg]), \
|
---|
4758 | (uint32_t)RT_UOFFSETOF(VMCPU, cpum.GstCtx.XState.u.YmmHi.aYmmHi[a_iSimdReg])
|
---|
4759 | /* [kIemNativeGstSimdReg_SimdRegFirst + 0] = */ { CPUMCTX_OFF_AND_SIZE(0), "ymm0", },
|
---|
4760 | /* [kIemNativeGstSimdReg_SimdRegFirst + 1] = */ { CPUMCTX_OFF_AND_SIZE(1), "ymm1", },
|
---|
4761 | /* [kIemNativeGstSimdReg_SimdRegFirst + 2] = */ { CPUMCTX_OFF_AND_SIZE(2), "ymm2", },
|
---|
4762 | /* [kIemNativeGstSimdReg_SimdRegFirst + 3] = */ { CPUMCTX_OFF_AND_SIZE(3), "ymm3", },
|
---|
4763 | /* [kIemNativeGstSimdReg_SimdRegFirst + 4] = */ { CPUMCTX_OFF_AND_SIZE(4), "ymm4", },
|
---|
4764 | /* [kIemNativeGstSimdReg_SimdRegFirst + 5] = */ { CPUMCTX_OFF_AND_SIZE(5), "ymm5", },
|
---|
4765 | /* [kIemNativeGstSimdReg_SimdRegFirst + 6] = */ { CPUMCTX_OFF_AND_SIZE(6), "ymm6", },
|
---|
4766 | /* [kIemNativeGstSimdReg_SimdRegFirst + 7] = */ { CPUMCTX_OFF_AND_SIZE(7), "ymm7", },
|
---|
4767 | /* [kIemNativeGstSimdReg_SimdRegFirst + 8] = */ { CPUMCTX_OFF_AND_SIZE(8), "ymm8", },
|
---|
4768 | /* [kIemNativeGstSimdReg_SimdRegFirst + 9] = */ { CPUMCTX_OFF_AND_SIZE(9), "ymm9", },
|
---|
4769 | /* [kIemNativeGstSimdReg_SimdRegFirst + 10] = */ { CPUMCTX_OFF_AND_SIZE(10), "ymm10", },
|
---|
4770 | /* [kIemNativeGstSimdReg_SimdRegFirst + 11] = */ { CPUMCTX_OFF_AND_SIZE(11), "ymm11", },
|
---|
4771 | /* [kIemNativeGstSimdReg_SimdRegFirst + 12] = */ { CPUMCTX_OFF_AND_SIZE(12), "ymm12", },
|
---|
4772 | /* [kIemNativeGstSimdReg_SimdRegFirst + 13] = */ { CPUMCTX_OFF_AND_SIZE(13), "ymm13", },
|
---|
4773 | /* [kIemNativeGstSimdReg_SimdRegFirst + 14] = */ { CPUMCTX_OFF_AND_SIZE(14), "ymm14", },
|
---|
4774 | /* [kIemNativeGstSimdReg_SimdRegFirst + 15] = */ { CPUMCTX_OFF_AND_SIZE(15), "ymm15", },
|
---|
4775 | #undef CPUMCTX_OFF_AND_SIZE
|
---|
4776 | };
|
---|
4777 | AssertCompile(RT_ELEMENTS(g_aGstSimdShadowInfo) == kIemNativeGstSimdReg_End);
|
---|
4778 |
|
---|
4779 |
|
---|
4780 | /**
|
---|
4781 | * Frees a temporary SIMD register.
|
---|
4782 | *
|
---|
4783 | * Any shadow copies of guest registers assigned to the host register will not
|
---|
4784 | * be flushed by this operation.
|
---|
4785 | */
|
---|
4786 | DECLHIDDEN(void) iemNativeSimdRegFreeTmp(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstSimdReg) RT_NOEXCEPT
|
---|
4787 | {
|
---|
4788 | Assert(pReNative->Core.bmHstSimdRegs & RT_BIT_32(idxHstSimdReg));
|
---|
4789 | Assert(pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmWhat == kIemNativeWhat_Tmp);
|
---|
4790 | pReNative->Core.bmHstSimdRegs &= ~RT_BIT_32(idxHstSimdReg);
|
---|
4791 | Log12(("iemNativeSimdRegFreeTmp: %s (gst: %#RX64)\n",
|
---|
4792 | g_apszIemNativeHstSimdRegNames[idxHstSimdReg], pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows));
|
---|
4793 | }
|
---|
4794 |
|
---|
4795 |
|
---|
4796 | /**
|
---|
4797 | * Emits code to flush a pending write of the given SIMD register if any, also flushes the guest to host SIMD register association.
|
---|
4798 | *
|
---|
4799 | * @returns New code bufferoffset.
|
---|
4800 | * @param pReNative The native recompile state.
|
---|
4801 | * @param off Current code buffer position.
|
---|
4802 | * @param enmGstSimdReg The guest SIMD register to flush.
|
---|
4803 | */
|
---|
4804 | DECL_HIDDEN_THROW(uint32_t)
|
---|
4805 | iemNativeSimdRegFlushPendingWrite(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVEGSTSIMDREG enmGstSimdReg)
|
---|
4806 | {
|
---|
4807 | uint8_t const idxHstSimdReg = pReNative->Core.aidxGstSimdRegShadows[enmGstSimdReg];
|
---|
4808 |
|
---|
4809 | Log12(("iemNativeSimdRegFlushPendingWrite: Clearing guest register %s shadowed by host %s with state DirtyLo:%u DirtyHi:%u\n",
|
---|
4810 | g_aGstSimdShadowInfo[enmGstSimdReg].pszName, g_apszIemNativeHstSimdRegNames[idxHstSimdReg],
|
---|
4811 | IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_LO_U128(pReNative, enmGstSimdReg),
|
---|
4812 | IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_HI_U128(pReNative, enmGstSimdReg)));
|
---|
4813 |
|
---|
4814 | if (IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_LO_U128(pReNative, enmGstSimdReg))
|
---|
4815 | {
|
---|
4816 | Assert( pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_256
|
---|
4817 | || pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_Low128);
|
---|
4818 | off = iemNativeEmitSimdStoreVecRegToVCpuLowU128(pReNative, off, idxHstSimdReg, g_aGstSimdShadowInfo[enmGstSimdReg].offXmm);
|
---|
4819 | }
|
---|
4820 |
|
---|
4821 | if (IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_HI_U128(pReNative, enmGstSimdReg))
|
---|
4822 | {
|
---|
4823 | Assert( pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_256
|
---|
4824 | || pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_High128);
|
---|
4825 | off = iemNativeEmitSimdStoreVecRegToVCpuHighU128(pReNative, off, idxHstSimdReg, g_aGstSimdShadowInfo[enmGstSimdReg].offYmm);
|
---|
4826 | }
|
---|
4827 |
|
---|
4828 | IEMNATIVE_SIMD_REG_STATE_CLR_DIRTY(pReNative, enmGstSimdReg);
|
---|
4829 | return off;
|
---|
4830 | }
|
---|
4831 |
|
---|
4832 |
|
---|
4833 | /**
|
---|
4834 | * Flush the given set of guest SIMD registers if marked as dirty.
|
---|
4835 | *
|
---|
4836 | * @returns New code buffer offset.
|
---|
4837 | * @param pReNative The native recompile state.
|
---|
4838 | * @param off Current code buffer position.
|
---|
4839 | * @param fFlushGstSimdReg The guest SIMD register set to flush (default is flush everything).
|
---|
4840 | */
|
---|
4841 | DECL_HIDDEN_THROW(uint32_t)
|
---|
4842 | iemNativeSimdRegFlushDirtyGuest(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint64_t fFlushGstSimdReg /*= UINT64_MAX*/)
|
---|
4843 | {
|
---|
4844 | uint64_t bmGstSimdRegShadowDirty = (pReNative->Core.bmGstSimdRegShadowDirtyLo128 | pReNative->Core.bmGstSimdRegShadowDirtyHi128)
|
---|
4845 | & fFlushGstSimdReg;
|
---|
4846 | if (bmGstSimdRegShadowDirty)
|
---|
4847 | {
|
---|
4848 | # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
4849 | iemNativeDbgInfoAddNativeOffset(pReNative, off);
|
---|
4850 | iemNativeDbgInfoAddGuestRegWriteback(pReNative, true /*fSimdReg*/, bmGstSimdRegShadowDirty);
|
---|
4851 | # endif
|
---|
4852 |
|
---|
4853 | do
|
---|
4854 | {
|
---|
4855 | unsigned const idxGstSimdReg = ASMBitFirstSetU64(bmGstSimdRegShadowDirty) - 1;
|
---|
4856 | bmGstSimdRegShadowDirty &= ~RT_BIT_64(idxGstSimdReg);
|
---|
4857 | off = iemNativeSimdRegFlushPendingWrite(pReNative, off, IEMNATIVEGSTSIMDREG_SIMD(idxGstSimdReg));
|
---|
4858 | } while (bmGstSimdRegShadowDirty);
|
---|
4859 | }
|
---|
4860 |
|
---|
4861 | return off;
|
---|
4862 | }
|
---|
4863 |
|
---|
4864 |
|
---|
4865 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
4866 | /**
|
---|
4867 | * Flush all shadowed guest SIMD registers marked as dirty for the given host SIMD register.
|
---|
4868 | *
|
---|
4869 | * @returns New code buffer offset.
|
---|
4870 | * @param pReNative The native recompile state.
|
---|
4871 | * @param off Current code buffer position.
|
---|
4872 | * @param idxHstSimdReg The host SIMD register.
|
---|
4873 | *
|
---|
4874 | * @note This doesn't do any unshadowing of guest registers from the host register.
|
---|
4875 | */
|
---|
4876 | DECL_HIDDEN_THROW(uint32_t) iemNativeSimdRegFlushDirtyGuestByHostSimdRegShadow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t const idxHstSimdReg)
|
---|
4877 | {
|
---|
4878 | /* We need to flush any pending guest register writes this host register shadows. */
|
---|
4879 | uint64_t bmGstSimdRegShadowDirty = (pReNative->Core.bmGstSimdRegShadowDirtyLo128 | pReNative->Core.bmGstSimdRegShadowDirtyHi128)
|
---|
4880 | & pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows;
|
---|
4881 | if (bmGstSimdRegShadowDirty)
|
---|
4882 | {
|
---|
4883 | # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
4884 | iemNativeDbgInfoAddNativeOffset(pReNative, off);
|
---|
4885 | iemNativeDbgInfoAddGuestRegWriteback(pReNative, true /*fSimdReg*/, bmGstSimdRegShadowDirty);
|
---|
4886 | # endif
|
---|
4887 |
|
---|
4888 | do
|
---|
4889 | {
|
---|
4890 | unsigned const idxGstSimdReg = ASMBitFirstSetU64(bmGstSimdRegShadowDirty) - 1;
|
---|
4891 | bmGstSimdRegShadowDirty &= ~RT_BIT_64(idxGstSimdReg);
|
---|
4892 | off = iemNativeSimdRegFlushPendingWrite(pReNative, off, IEMNATIVEGSTSIMDREG_SIMD(idxGstSimdReg));
|
---|
4893 | Assert(!IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_U256(pReNative, idxGstSimdReg));
|
---|
4894 | } while (bmGstSimdRegShadowDirty);
|
---|
4895 | }
|
---|
4896 |
|
---|
4897 | return off;
|
---|
4898 | }
|
---|
4899 | #endif
|
---|
4900 |
|
---|
4901 |
|
---|
4902 | /**
|
---|
4903 | * Locate a register, possibly freeing one up.
|
---|
4904 | *
|
---|
4905 | * This ASSUMES the caller has done the minimal/optimal allocation checks and
|
---|
4906 | * failed.
|
---|
4907 | *
|
---|
4908 | * @returns Host register number on success. Returns UINT8_MAX if no registers
|
---|
4909 | * found, the caller is supposed to deal with this and raise a
|
---|
4910 | * allocation type specific status code (if desired).
|
---|
4911 | *
|
---|
4912 | * @throws VBox status code if we're run into trouble spilling a variable of
|
---|
4913 | * recording debug info. Does NOT throw anything if we're out of
|
---|
4914 | * registers, though.
|
---|
4915 | */
|
---|
4916 | static uint8_t iemNativeSimdRegAllocFindFree(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fPreferVolatile,
|
---|
4917 | uint32_t fRegMask = IEMNATIVE_HST_SIMD_REG_MASK & ~IEMNATIVE_SIMD_REG_FIXED_MASK)
|
---|
4918 | {
|
---|
4919 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeSimdRegFindFree);
|
---|
4920 | Assert(!(fRegMask & ~IEMNATIVE_HST_SIMD_REG_MASK));
|
---|
4921 | Assert(!(fRegMask & IEMNATIVE_SIMD_REG_FIXED_MASK));
|
---|
4922 |
|
---|
4923 | /*
|
---|
4924 | * Try a freed register that's shadowing a guest register.
|
---|
4925 | */
|
---|
4926 | uint32_t fRegs = ~pReNative->Core.bmHstSimdRegs & fRegMask;
|
---|
4927 | if (fRegs)
|
---|
4928 | {
|
---|
4929 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeSimdRegFindFreeNoVar);
|
---|
4930 |
|
---|
4931 | #if 0 /** @todo def IEMNATIVE_WITH_LIVENESS_ANALYSIS */
|
---|
4932 | /*
|
---|
4933 | * When we have livness information, we use it to kick out all shadowed
|
---|
4934 | * guest register that will not be needed any more in this TB. If we're
|
---|
4935 | * lucky, this may prevent us from ending up here again.
|
---|
4936 | *
|
---|
4937 | * Note! We must consider the previous entry here so we don't free
|
---|
4938 | * anything that the current threaded function requires (current
|
---|
4939 | * entry is produced by the next threaded function).
|
---|
4940 | */
|
---|
4941 | uint32_t const idxCurCall = pReNative->idxCurCall;
|
---|
4942 | if (idxCurCall > 0)
|
---|
4943 | {
|
---|
4944 | PCIEMLIVENESSENTRY const pLivenessEntry = &pReNative->paLivenessEntries[idxCurCall - 1];
|
---|
4945 |
|
---|
4946 | # ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
4947 | /* Construct a mask of the guest registers in the UNUSED and XCPT_OR_CALL state. */
|
---|
4948 | AssertCompile(IEMLIVENESS_STATE_UNUSED == 1 && IEMLIVENESS_STATE_XCPT_OR_CALL == 2);
|
---|
4949 | uint64_t fToFreeMask = pLivenessEntry->Bit0.bm64 ^ pLivenessEntry->Bit1.bm64; /* mask of regs in either UNUSED */
|
---|
4950 | #else
|
---|
4951 | /* Construct a mask of the registers not in the read or write state.
|
---|
4952 | Note! We could skips writes, if they aren't from us, as this is just
|
---|
4953 | a hack to prevent trashing registers that have just been written
|
---|
4954 | or will be written when we retire the current instruction. */
|
---|
4955 | uint64_t fToFreeMask = ~pLivenessEntry->aBits[IEMLIVENESS_BIT_READ].bm64
|
---|
4956 | & ~pLivenessEntry->aBits[IEMLIVENESS_BIT_WRITE].bm64
|
---|
4957 | & IEMLIVENESSBIT_MASK;
|
---|
4958 | #endif
|
---|
4959 | /* If it matches any shadowed registers. */
|
---|
4960 | if (pReNative->Core.bmGstRegShadows & fToFreeMask)
|
---|
4961 | {
|
---|
4962 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessUnshadowed);
|
---|
4963 | iemNativeRegFlushGuestShadows(pReNative, fToFreeMask);
|
---|
4964 | Assert(fRegs == (~pReNative->Core.bmHstRegs & fRegMask)); /* this shall not change. */
|
---|
4965 |
|
---|
4966 | /* See if we've got any unshadowed registers we can return now. */
|
---|
4967 | uint32_t const fUnshadowedRegs = fRegs & ~pReNative->Core.bmHstRegsWithGstShadow;
|
---|
4968 | if (fUnshadowedRegs)
|
---|
4969 | {
|
---|
4970 | STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessHelped);
|
---|
4971 | return (fPreferVolatile
|
---|
4972 | ? ASMBitFirstSetU32(fUnshadowedRegs)
|
---|
4973 | : ASMBitLastSetU32( fUnshadowedRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK
|
---|
4974 | ? fUnshadowedRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK : fUnshadowedRegs))
|
---|
4975 | - 1;
|
---|
4976 | }
|
---|
4977 | }
|
---|
4978 | }
|
---|
4979 | #endif /* IEMNATIVE_WITH_LIVENESS_ANALYSIS */
|
---|
4980 |
|
---|
4981 | unsigned const idxReg = (fPreferVolatile
|
---|
4982 | ? ASMBitFirstSetU32(fRegs)
|
---|
4983 | : ASMBitLastSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK
|
---|
4984 | ? fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK : fRegs))
|
---|
4985 | - 1;
|
---|
4986 |
|
---|
4987 | Assert(pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows != 0);
|
---|
4988 | Assert( (pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows & pReNative->Core.bmGstSimdRegShadows)
|
---|
4989 | == pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows);
|
---|
4990 | Assert(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxReg));
|
---|
4991 |
|
---|
4992 | /* We need to flush any pending guest register writes this host SIMD register shadows. */
|
---|
4993 | *poff = iemNativeSimdRegFlushDirtyGuestByHostSimdRegShadow(pReNative, *poff, idxReg);
|
---|
4994 |
|
---|
4995 | pReNative->Core.bmHstSimdRegsWithGstShadow &= ~RT_BIT_32(idxReg);
|
---|
4996 | pReNative->Core.bmGstSimdRegShadows &= ~pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows;
|
---|
4997 | pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows = 0;
|
---|
4998 | pReNative->Core.aHstSimdRegs[idxReg].enmLoaded = kIemNativeGstSimdRegLdStSz_Invalid;
|
---|
4999 | return idxReg;
|
---|
5000 | }
|
---|
5001 |
|
---|
5002 | AssertFailed(); /** @todo The following needs testing when it actually gets hit. */
|
---|
5003 |
|
---|
5004 | /*
|
---|
5005 | * Try free up a variable that's in a register.
|
---|
5006 | *
|
---|
5007 | * We do two rounds here, first evacuating variables we don't need to be
|
---|
5008 | * saved on the stack, then in the second round move things to the stack.
|
---|
5009 | */
|
---|
5010 | STAM_REL_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeSimdRegFindFreeVar);
|
---|
5011 | for (uint32_t iLoop = 0; iLoop < 2; iLoop++)
|
---|
5012 | {
|
---|
5013 | uint32_t fVars = pReNative->Core.bmVars;
|
---|
5014 | while (fVars)
|
---|
5015 | {
|
---|
5016 | uint32_t const idxVar = ASMBitFirstSetU32(fVars) - 1;
|
---|
5017 | uint8_t const idxReg = pReNative->Core.aVars[idxVar].idxReg;
|
---|
5018 | if (!pReNative->Core.aVars[idxVar].fSimdReg) /* Ignore non SIMD variables here. */
|
---|
5019 | continue;
|
---|
5020 |
|
---|
5021 | if ( idxReg < RT_ELEMENTS(pReNative->Core.aHstSimdRegs)
|
---|
5022 | && (RT_BIT_32(idxReg) & fRegMask)
|
---|
5023 | && ( iLoop == 0
|
---|
5024 | ? pReNative->Core.aVars[idxVar].enmKind != kIemNativeVarKind_Stack
|
---|
5025 | : pReNative->Core.aVars[idxVar].enmKind == kIemNativeVarKind_Stack)
|
---|
5026 | && !pReNative->Core.aVars[idxVar].fRegAcquired)
|
---|
5027 | {
|
---|
5028 | Assert(pReNative->Core.bmHstSimdRegs & RT_BIT_32(idxReg));
|
---|
5029 | Assert( (pReNative->Core.bmGstSimdRegShadows & pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows)
|
---|
5030 | == pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows);
|
---|
5031 | Assert(pReNative->Core.bmGstSimdRegShadows < RT_BIT_64(kIemNativeGstSimdReg_End));
|
---|
5032 | Assert( RT_BOOL(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxReg))
|
---|
5033 | == RT_BOOL(pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows));
|
---|
5034 |
|
---|
5035 | if (pReNative->Core.aVars[idxVar].enmKind == kIemNativeVarKind_Stack)
|
---|
5036 | {
|
---|
5037 | uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, IEMNATIVE_VAR_IDX_PACK(idxVar));
|
---|
5038 | *poff = iemNativeEmitStoreGprByBp(pReNative, *poff, iemNativeStackCalcBpDisp(idxStackSlot), idxReg);
|
---|
5039 | }
|
---|
5040 |
|
---|
5041 | pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX;
|
---|
5042 | pReNative->Core.bmHstSimdRegs &= ~RT_BIT_32(idxReg);
|
---|
5043 |
|
---|
5044 | pReNative->Core.bmHstSimdRegsWithGstShadow &= ~RT_BIT_32(idxReg);
|
---|
5045 | pReNative->Core.bmGstSimdRegShadows &= ~pReNative->Core.aHstRegs[idxReg].fGstRegShadows;
|
---|
5046 | pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows = 0;
|
---|
5047 | return idxReg;
|
---|
5048 | }
|
---|
5049 | fVars &= ~RT_BIT_32(idxVar);
|
---|
5050 | }
|
---|
5051 | }
|
---|
5052 |
|
---|
5053 | AssertFailed();
|
---|
5054 | return UINT8_MAX;
|
---|
5055 | }
|
---|
5056 |
|
---|
5057 |
|
---|
5058 | /**
|
---|
5059 | * Flushes a set of guest register shadow copies.
|
---|
5060 | *
|
---|
5061 | * This is usually done after calling a threaded function or a C-implementation
|
---|
5062 | * of an instruction.
|
---|
5063 | *
|
---|
5064 | * @param pReNative The native recompile state.
|
---|
5065 | * @param fGstSimdRegs Set of guest SIMD registers to flush.
|
---|
5066 | */
|
---|
5067 | DECLHIDDEN(void) iemNativeSimdRegFlushGuestShadows(PIEMRECOMPILERSTATE pReNative, uint64_t fGstSimdRegs) RT_NOEXCEPT
|
---|
5068 | {
|
---|
5069 | /*
|
---|
5070 | * Reduce the mask by what's currently shadowed
|
---|
5071 | */
|
---|
5072 | uint64_t const bmGstSimdRegShadows = pReNative->Core.bmGstSimdRegShadows;
|
---|
5073 | fGstSimdRegs &= bmGstSimdRegShadows;
|
---|
5074 | if (fGstSimdRegs)
|
---|
5075 | {
|
---|
5076 | uint64_t const bmGstSimdRegShadowsNew = bmGstSimdRegShadows & ~fGstSimdRegs;
|
---|
5077 | Log12(("iemNativeSimdRegFlushGuestShadows: flushing %#RX64 (%#RX64 -> %#RX64)\n", fGstSimdRegs, bmGstSimdRegShadows, bmGstSimdRegShadowsNew));
|
---|
5078 | pReNative->Core.bmGstSimdRegShadows = bmGstSimdRegShadowsNew;
|
---|
5079 | if (bmGstSimdRegShadowsNew)
|
---|
5080 | {
|
---|
5081 | /*
|
---|
5082 | * Partial.
|
---|
5083 | */
|
---|
5084 | do
|
---|
5085 | {
|
---|
5086 | unsigned const idxGstReg = ASMBitFirstSetU64(fGstSimdRegs) - 1;
|
---|
5087 | uint8_t const idxHstReg = pReNative->Core.aidxGstSimdRegShadows[idxGstReg];
|
---|
5088 | Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aidxGstSimdRegShadows));
|
---|
5089 | Assert(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxHstReg));
|
---|
5090 | Assert(pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows & RT_BIT_64(idxGstReg));
|
---|
5091 | Assert(!IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_U256(pReNative, idxGstReg));
|
---|
5092 |
|
---|
5093 | uint64_t const fInThisHstReg = (pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows & fGstSimdRegs) | RT_BIT_64(idxGstReg);
|
---|
5094 | fGstSimdRegs &= ~fInThisHstReg;
|
---|
5095 | uint64_t const fGstRegShadowsNew = pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows & ~fInThisHstReg;
|
---|
5096 | pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows = fGstRegShadowsNew;
|
---|
5097 | if (!fGstRegShadowsNew)
|
---|
5098 | {
|
---|
5099 | pReNative->Core.bmHstSimdRegsWithGstShadow &= ~RT_BIT_32(idxHstReg);
|
---|
5100 | pReNative->Core.aHstSimdRegs[idxHstReg].enmLoaded = kIemNativeGstSimdRegLdStSz_Invalid;
|
---|
5101 | }
|
---|
5102 | } while (fGstSimdRegs != 0);
|
---|
5103 | }
|
---|
5104 | else
|
---|
5105 | {
|
---|
5106 | /*
|
---|
5107 | * Clear all.
|
---|
5108 | */
|
---|
5109 | do
|
---|
5110 | {
|
---|
5111 | unsigned const idxGstReg = ASMBitFirstSetU64(fGstSimdRegs) - 1;
|
---|
5112 | uint8_t const idxHstReg = pReNative->Core.aidxGstSimdRegShadows[idxGstReg];
|
---|
5113 | Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aidxGstSimdRegShadows));
|
---|
5114 | Assert(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxHstReg));
|
---|
5115 | Assert(pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows & RT_BIT_64(idxGstReg));
|
---|
5116 | Assert(!IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_U256(pReNative, idxGstReg));
|
---|
5117 |
|
---|
5118 | fGstSimdRegs &= ~(pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows | RT_BIT_64(idxGstReg));
|
---|
5119 | pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows = 0;
|
---|
5120 | pReNative->Core.aHstSimdRegs[idxHstReg].enmLoaded = kIemNativeGstSimdRegLdStSz_Invalid;
|
---|
5121 | } while (fGstSimdRegs != 0);
|
---|
5122 | pReNative->Core.bmHstSimdRegsWithGstShadow = 0;
|
---|
5123 | }
|
---|
5124 | }
|
---|
5125 | }
|
---|
5126 |
|
---|
5127 |
|
---|
5128 | /**
|
---|
5129 | * Allocates a temporary host SIMD register.
|
---|
5130 | *
|
---|
5131 | * This may emit code to save register content onto the stack in order to free
|
---|
5132 | * up a register.
|
---|
5133 | *
|
---|
5134 | * @returns The host register number; throws VBox status code on failure,
|
---|
5135 | * so no need to check the return value.
|
---|
5136 | * @param pReNative The native recompile state.
|
---|
5137 | * @param poff Pointer to the variable with the code buffer position.
|
---|
5138 | * This will be update if we need to move a variable from
|
---|
5139 | * register to stack in order to satisfy the request.
|
---|
5140 | * @param fPreferVolatile Whether to prefer volatile over non-volatile
|
---|
5141 | * registers (@c true, default) or the other way around
|
---|
5142 | * (@c false, for iemNativeRegAllocTmpForGuestReg()).
|
---|
5143 | */
|
---|
5144 | DECL_HIDDEN_THROW(uint8_t) iemNativeSimdRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fPreferVolatile /*= true*/)
|
---|
5145 | {
|
---|
5146 | /*
|
---|
5147 | * Try find a completely unused register, preferably a call-volatile one.
|
---|
5148 | */
|
---|
5149 | uint8_t idxSimdReg;
|
---|
5150 | uint32_t fRegs = ~pReNative->Core.bmHstRegs
|
---|
5151 | & ~pReNative->Core.bmHstRegsWithGstShadow
|
---|
5152 | & (~IEMNATIVE_SIMD_REG_FIXED_MASK & IEMNATIVE_HST_SIMD_REG_MASK);
|
---|
5153 | if (fRegs)
|
---|
5154 | {
|
---|
5155 | if (fPreferVolatile)
|
---|
5156 | idxSimdReg = (uint8_t)ASMBitFirstSetU32( fRegs & IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK
|
---|
5157 | ? fRegs & IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK : fRegs) - 1;
|
---|
5158 | else
|
---|
5159 | idxSimdReg = (uint8_t)ASMBitFirstSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK
|
---|
5160 | ? fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK : fRegs) - 1;
|
---|
5161 | Assert(pReNative->Core.aHstSimdRegs[idxSimdReg].fGstRegShadows == 0);
|
---|
5162 | Assert(!(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxSimdReg)));
|
---|
5163 |
|
---|
5164 | pReNative->Core.aHstSimdRegs[idxSimdReg].enmLoaded = kIemNativeGstSimdRegLdStSz_Invalid;
|
---|
5165 | Log12(("iemNativeSimdRegAllocTmp: %s\n", g_apszIemNativeHstSimdRegNames[idxSimdReg]));
|
---|
5166 | }
|
---|
5167 | else
|
---|
5168 | {
|
---|
5169 | idxSimdReg = iemNativeSimdRegAllocFindFree(pReNative, poff, fPreferVolatile);
|
---|
5170 | AssertStmt(idxSimdReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_TMP));
|
---|
5171 | Log12(("iemNativeSimdRegAllocTmp: %s (slow)\n", g_apszIemNativeHstSimdRegNames[idxSimdReg]));
|
---|
5172 | }
|
---|
5173 |
|
---|
5174 | Assert(pReNative->Core.aHstSimdRegs[idxSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_Invalid);
|
---|
5175 | return iemNativeSimdRegMarkAllocated(pReNative, idxSimdReg, kIemNativeWhat_Tmp);
|
---|
5176 | }
|
---|
5177 |
|
---|
5178 |
|
---|
5179 | /**
|
---|
5180 | * Alternative version of iemNativeSimdRegAllocTmp that takes mask with acceptable
|
---|
5181 | * registers.
|
---|
5182 | *
|
---|
5183 | * @returns The host register number; throws VBox status code on failure,
|
---|
5184 | * so no need to check the return value.
|
---|
5185 | * @param pReNative The native recompile state.
|
---|
5186 | * @param poff Pointer to the variable with the code buffer position.
|
---|
5187 | * This will be update if we need to move a variable from
|
---|
5188 | * register to stack in order to satisfy the request.
|
---|
5189 | * @param fRegMask Mask of acceptable registers.
|
---|
5190 | * @param fPreferVolatile Whether to prefer volatile over non-volatile
|
---|
5191 | * registers (@c true, default) or the other way around
|
---|
5192 | * (@c false, for iemNativeRegAllocTmpForGuestReg()).
|
---|
5193 | */
|
---|
5194 | DECL_HIDDEN_THROW(uint8_t) iemNativeSimdRegAllocTmpEx(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint32_t fRegMask,
|
---|
5195 | bool fPreferVolatile /*= true*/)
|
---|
5196 | {
|
---|
5197 | Assert(!(fRegMask & ~IEMNATIVE_HST_SIMD_REG_MASK));
|
---|
5198 | Assert(!(fRegMask & IEMNATIVE_SIMD_REG_FIXED_MASK));
|
---|
5199 |
|
---|
5200 | /*
|
---|
5201 | * Try find a completely unused register, preferably a call-volatile one.
|
---|
5202 | */
|
---|
5203 | uint8_t idxSimdReg;
|
---|
5204 | uint32_t fRegs = ~pReNative->Core.bmHstSimdRegs
|
---|
5205 | & ~pReNative->Core.bmHstSimdRegsWithGstShadow
|
---|
5206 | & (~IEMNATIVE_SIMD_REG_FIXED_MASK & IEMNATIVE_HST_SIMD_REG_MASK)
|
---|
5207 | & fRegMask;
|
---|
5208 | if (fRegs)
|
---|
5209 | {
|
---|
5210 | if (fPreferVolatile)
|
---|
5211 | idxSimdReg = (uint8_t)ASMBitFirstSetU32( fRegs & IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK
|
---|
5212 | ? fRegs & IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK : fRegs) - 1;
|
---|
5213 | else
|
---|
5214 | idxSimdReg = (uint8_t)ASMBitFirstSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK
|
---|
5215 | ? fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK : fRegs) - 1;
|
---|
5216 | Assert(pReNative->Core.aHstSimdRegs[idxSimdReg].fGstRegShadows == 0);
|
---|
5217 | Assert(!(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxSimdReg)));
|
---|
5218 |
|
---|
5219 | pReNative->Core.aHstSimdRegs[idxSimdReg].enmLoaded = kIemNativeGstSimdRegLdStSz_Invalid;
|
---|
5220 | Log12(("iemNativeSimdRegAllocTmpEx: %s\n", g_apszIemNativeHstSimdRegNames[idxSimdReg]));
|
---|
5221 | }
|
---|
5222 | else
|
---|
5223 | {
|
---|
5224 | idxSimdReg = iemNativeSimdRegAllocFindFree(pReNative, poff, fPreferVolatile, fRegMask);
|
---|
5225 | AssertStmt(idxSimdReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_TMP));
|
---|
5226 | Log12(("iemNativeSimdRegAllocTmpEx: %s (slow)\n", g_apszIemNativeHstSimdRegNames[idxSimdReg]));
|
---|
5227 | }
|
---|
5228 |
|
---|
5229 | Assert(pReNative->Core.aHstSimdRegs[idxSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_Invalid);
|
---|
5230 | return iemNativeSimdRegMarkAllocated(pReNative, idxSimdReg, kIemNativeWhat_Tmp);
|
---|
5231 | }
|
---|
5232 |
|
---|
5233 |
|
---|
5234 | /**
|
---|
5235 | * Sets the indiactor for which part of the given SIMD register has valid data loaded.
|
---|
5236 | *
|
---|
5237 | * @param pReNative The native recompile state.
|
---|
5238 | * @param idxHstSimdReg The host SIMD register to update the state for.
|
---|
5239 | * @param enmLoadSz The load size to set.
|
---|
5240 | */
|
---|
5241 | DECL_FORCE_INLINE(void) iemNativeSimdRegSetValidLoadFlag(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstSimdReg,
|
---|
5242 | IEMNATIVEGSTSIMDREGLDSTSZ enmLoadSz)
|
---|
5243 | {
|
---|
5244 | /* Everything valid already? -> nothing to do. */
|
---|
5245 | if (pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_256)
|
---|
5246 | return;
|
---|
5247 |
|
---|
5248 | if (pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_Invalid)
|
---|
5249 | pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded = enmLoadSz;
|
---|
5250 | else if (pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded != enmLoadSz)
|
---|
5251 | {
|
---|
5252 | Assert( ( pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_Low128
|
---|
5253 | && enmLoadSz == kIemNativeGstSimdRegLdStSz_High128)
|
---|
5254 | || ( pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_High128
|
---|
5255 | && enmLoadSz == kIemNativeGstSimdRegLdStSz_Low128));
|
---|
5256 | pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded = kIemNativeGstSimdRegLdStSz_256;
|
---|
5257 | }
|
---|
5258 | }
|
---|
5259 |
|
---|
5260 |
|
---|
5261 | static uint32_t iemNativeSimdRegAllocLoadVecRegFromVecRegSz(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVEGSTSIMDREG enmGstSimdRegDst,
|
---|
5262 | uint8_t idxHstSimdRegDst, uint8_t idxHstSimdRegSrc, IEMNATIVEGSTSIMDREGLDSTSZ enmLoadSzDst)
|
---|
5263 | {
|
---|
5264 | /* Easy case first, either the destination loads the same range as what the source has already loaded or the source has loaded everything. */
|
---|
5265 | if ( pReNative->Core.aHstSimdRegs[idxHstSimdRegSrc].enmLoaded == enmLoadSzDst
|
---|
5266 | || pReNative->Core.aHstSimdRegs[idxHstSimdRegSrc].enmLoaded == kIemNativeGstSimdRegLdStSz_256)
|
---|
5267 | {
|
---|
5268 | # ifdef RT_ARCH_ARM64
|
---|
5269 | /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
|
---|
5270 | Assert(!(idxHstSimdRegDst & 0x1)); Assert(!(idxHstSimdRegSrc & 0x1));
|
---|
5271 | # endif
|
---|
5272 |
|
---|
5273 | if (idxHstSimdRegDst != idxHstSimdRegSrc)
|
---|
5274 | {
|
---|
5275 | switch (enmLoadSzDst)
|
---|
5276 | {
|
---|
5277 | case kIemNativeGstSimdRegLdStSz_256:
|
---|
5278 | off = iemNativeEmitSimdLoadVecRegFromVecRegU256(pReNative, off, idxHstSimdRegDst, idxHstSimdRegSrc);
|
---|
5279 | break;
|
---|
5280 | case kIemNativeGstSimdRegLdStSz_Low128:
|
---|
5281 | off = iemNativeEmitSimdLoadVecRegFromVecRegU128(pReNative, off, idxHstSimdRegDst, idxHstSimdRegSrc);
|
---|
5282 | break;
|
---|
5283 | case kIemNativeGstSimdRegLdStSz_High128:
|
---|
5284 | off = iemNativeEmitSimdLoadVecRegHighU128FromVecRegHighU128(pReNative, off, idxHstSimdRegDst, idxHstSimdRegSrc);
|
---|
5285 | break;
|
---|
5286 | default:
|
---|
5287 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IPE_NOT_REACHED_DEFAULT_CASE));
|
---|
5288 | }
|
---|
5289 |
|
---|
5290 | iemNativeSimdRegSetValidLoadFlag(pReNative, idxHstSimdRegDst, enmLoadSzDst);
|
---|
5291 | }
|
---|
5292 | }
|
---|
5293 | else
|
---|
5294 | {
|
---|
5295 | /* The source doesn't has the part loaded, so load the register from CPUMCTX. */
|
---|
5296 | Assert(enmLoadSzDst == kIemNativeGstSimdRegLdStSz_Low128 || enmLoadSzDst == kIemNativeGstSimdRegLdStSz_High128);
|
---|
5297 | off = iemNativeEmitLoadSimdRegWithGstShadowSimdReg(pReNative, off, idxHstSimdRegDst, enmGstSimdRegDst, enmLoadSzDst);
|
---|
5298 | }
|
---|
5299 |
|
---|
5300 | return off;
|
---|
5301 | }
|
---|
5302 |
|
---|
5303 |
|
---|
5304 | /**
|
---|
5305 | * Allocates a temporary host SIMD register for keeping a guest
|
---|
5306 | * SIMD register value.
|
---|
5307 | *
|
---|
5308 | * Since we may already have a register holding the guest register value,
|
---|
5309 | * code will be emitted to do the loading if that's not the case. Code may also
|
---|
5310 | * be emitted if we have to free up a register to satify the request.
|
---|
5311 | *
|
---|
5312 | * @returns The host register number; throws VBox status code on failure, so no
|
---|
5313 | * need to check the return value.
|
---|
5314 | * @param pReNative The native recompile state.
|
---|
5315 | * @param poff Pointer to the variable with the code buffer
|
---|
5316 | * position. This will be update if we need to move a
|
---|
5317 | * variable from register to stack in order to satisfy
|
---|
5318 | * the request.
|
---|
5319 | * @param enmGstSimdReg The guest SIMD register that will is to be updated.
|
---|
5320 | * @param enmIntendedUse How the caller will be using the host register.
|
---|
5321 | * @param fNoVolatileRegs Set if no volatile register allowed, clear if any
|
---|
5322 | * register is okay (default). The ASSUMPTION here is
|
---|
5323 | * that the caller has already flushed all volatile
|
---|
5324 | * registers, so this is only applied if we allocate a
|
---|
5325 | * new register.
|
---|
5326 | * @sa iemNativeRegAllocTmpForGuestRegIfAlreadyPresent
|
---|
5327 | */
|
---|
5328 | DECL_HIDDEN_THROW(uint8_t)
|
---|
5329 | iemNativeSimdRegAllocTmpForGuestSimdReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTSIMDREG enmGstSimdReg,
|
---|
5330 | IEMNATIVEGSTSIMDREGLDSTSZ enmLoadSz, IEMNATIVEGSTREGUSE enmIntendedUse /*= kIemNativeGstRegUse_ReadOnly*/,
|
---|
5331 | bool fNoVolatileRegs /*= false*/)
|
---|
5332 | {
|
---|
5333 | Assert(enmGstSimdReg < kIemNativeGstSimdReg_End);
|
---|
5334 | #if defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS) && 0 /** @todo r=aeichner */
|
---|
5335 | AssertMsg( pReNative->idxCurCall == 0
|
---|
5336 | || (enmIntendedUse == kIemNativeGstRegUse_ForFullWrite
|
---|
5337 | ? IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstSimdReg))
|
---|
5338 | : enmIntendedUse == kIemNativeGstRegUse_ForUpdate
|
---|
5339 | ? IEMLIVENESS_STATE_IS_MODIFY_EXPECTED( iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstSimdReg))
|
---|
5340 | : IEMLIVENESS_STATE_IS_INPUT_EXPECTED( iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstSimdReg)) ),
|
---|
5341 | ("%s - %u\n", g_aGstSimdShadowInfo[enmGstSimdReg].pszName, iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstSimdReg)));
|
---|
5342 | #endif
|
---|
5343 | #if defined(LOG_ENABLED) || defined(VBOX_STRICT)
|
---|
5344 | static const char * const s_pszIntendedUse[] = { "fetch", "update", "full write", "destructive calc" };
|
---|
5345 | #endif
|
---|
5346 | uint32_t const fRegMask = !fNoVolatileRegs
|
---|
5347 | ? IEMNATIVE_HST_SIMD_REG_MASK & ~IEMNATIVE_SIMD_REG_FIXED_MASK
|
---|
5348 | : IEMNATIVE_HST_SIMD_REG_MASK & ~IEMNATIVE_SIMD_REG_FIXED_MASK & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK;
|
---|
5349 |
|
---|
5350 | /*
|
---|
5351 | * First check if the guest register value is already in a host register.
|
---|
5352 | */
|
---|
5353 | if (pReNative->Core.bmGstSimdRegShadows & RT_BIT_64(enmGstSimdReg))
|
---|
5354 | {
|
---|
5355 | uint8_t idxSimdReg = pReNative->Core.aidxGstSimdRegShadows[enmGstSimdReg];
|
---|
5356 | Assert(idxSimdReg < RT_ELEMENTS(pReNative->Core.aHstSimdRegs));
|
---|
5357 | Assert(pReNative->Core.aHstSimdRegs[idxSimdReg].fGstRegShadows & RT_BIT_64(enmGstSimdReg));
|
---|
5358 | Assert(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxSimdReg));
|
---|
5359 |
|
---|
5360 | /* It's not supposed to be allocated... */
|
---|
5361 | if (!(pReNative->Core.bmHstSimdRegs & RT_BIT_32(idxSimdReg)))
|
---|
5362 | {
|
---|
5363 | /*
|
---|
5364 | * If the register will trash the guest shadow copy, try find a
|
---|
5365 | * completely unused register we can use instead. If that fails,
|
---|
5366 | * we need to disassociate the host reg from the guest reg.
|
---|
5367 | */
|
---|
5368 | /** @todo would be nice to know if preserving the register is in any way helpful. */
|
---|
5369 | /* If the purpose is calculations, try duplicate the register value as
|
---|
5370 | we'll be clobbering the shadow. */
|
---|
5371 | if ( enmIntendedUse == kIemNativeGstRegUse_Calculation
|
---|
5372 | && ( ~pReNative->Core.bmHstSimdRegs
|
---|
5373 | & ~pReNative->Core.bmHstSimdRegsWithGstShadow
|
---|
5374 | & (~IEMNATIVE_SIMD_REG_FIXED_MASK & IEMNATIVE_HST_SIMD_REG_MASK)))
|
---|
5375 | {
|
---|
5376 | uint8_t const idxRegNew = iemNativeSimdRegAllocTmpEx(pReNative, poff, fRegMask);
|
---|
5377 |
|
---|
5378 | *poff = iemNativeSimdRegAllocLoadVecRegFromVecRegSz(pReNative, *poff, enmGstSimdReg, idxRegNew, idxSimdReg, enmLoadSz);
|
---|
5379 |
|
---|
5380 | Log12(("iemNativeSimdRegAllocTmpForGuestSimdReg: Duplicated %s for guest %s into %s for destructive calc\n",
|
---|
5381 | g_apszIemNativeHstSimdRegNames[idxSimdReg], g_aGstSimdShadowInfo[enmGstSimdReg].pszName,
|
---|
5382 | g_apszIemNativeHstSimdRegNames[idxRegNew]));
|
---|
5383 | idxSimdReg = idxRegNew;
|
---|
5384 | }
|
---|
5385 | /* If the current register matches the restrictions, go ahead and allocate
|
---|
5386 | it for the caller. */
|
---|
5387 | else if (fRegMask & RT_BIT_32(idxSimdReg))
|
---|
5388 | {
|
---|
5389 | pReNative->Core.bmHstSimdRegs |= RT_BIT_32(idxSimdReg);
|
---|
5390 | pReNative->Core.aHstSimdRegs[idxSimdReg].enmWhat = kIemNativeWhat_Tmp;
|
---|
5391 | if (enmIntendedUse != kIemNativeGstRegUse_Calculation)
|
---|
5392 | {
|
---|
5393 | if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
|
---|
5394 | *poff = iemNativeSimdRegAllocLoadVecRegFromVecRegSz(pReNative, *poff, enmGstSimdReg, idxSimdReg, idxSimdReg, enmLoadSz);
|
---|
5395 | else
|
---|
5396 | iemNativeSimdRegSetValidLoadFlag(pReNative, idxSimdReg, enmLoadSz);
|
---|
5397 | Log12(("iemNativeSimdRegAllocTmpForGuestSimdReg: Reusing %s for guest %s %s\n",
|
---|
5398 | g_apszIemNativeHstSimdRegNames[idxSimdReg], g_aGstSimdShadowInfo[enmGstSimdReg].pszName, s_pszIntendedUse[enmIntendedUse]));
|
---|
5399 | }
|
---|
5400 | else
|
---|
5401 | {
|
---|
5402 | iemNativeSimdRegClearGstSimdRegShadowing(pReNative, idxSimdReg, *poff);
|
---|
5403 | Log12(("iemNativeSimdRegAllocTmpForGuestSimdReg: Grabbing %s for guest %s - destructive calc\n",
|
---|
5404 | g_apszIemNativeHstSimdRegNames[idxSimdReg], g_aGstSimdShadowInfo[enmGstSimdReg].pszName));
|
---|
5405 | }
|
---|
5406 | }
|
---|
5407 | /* Otherwise, allocate a register that satisfies the caller and transfer
|
---|
5408 | the shadowing if compatible with the intended use. (This basically
|
---|
5409 | means the call wants a non-volatile register (RSP push/pop scenario).) */
|
---|
5410 | else
|
---|
5411 | {
|
---|
5412 | Assert(fNoVolatileRegs);
|
---|
5413 | uint8_t const idxRegNew = iemNativeSimdRegAllocTmpEx(pReNative, poff, fRegMask & ~RT_BIT_32(idxSimdReg),
|
---|
5414 | !fNoVolatileRegs
|
---|
5415 | && enmIntendedUse == kIemNativeGstRegUse_Calculation);
|
---|
5416 | *poff = iemNativeSimdRegAllocLoadVecRegFromVecRegSz(pReNative, *poff, enmGstSimdReg, idxRegNew, idxSimdReg, enmLoadSz);
|
---|
5417 | if (enmIntendedUse != kIemNativeGstRegUse_Calculation)
|
---|
5418 | {
|
---|
5419 | iemNativeSimdRegTransferGstSimdRegShadowing(pReNative, idxSimdReg, idxRegNew, enmGstSimdReg, *poff);
|
---|
5420 | Log12(("iemNativeSimdRegAllocTmpForGuestSimdReg: Transfering %s to %s for guest %s %s\n",
|
---|
5421 | g_apszIemNativeHstSimdRegNames[idxSimdReg], g_apszIemNativeHstSimdRegNames[idxRegNew],
|
---|
5422 | g_aGstSimdShadowInfo[enmGstSimdReg].pszName, s_pszIntendedUse[enmIntendedUse]));
|
---|
5423 | }
|
---|
5424 | else
|
---|
5425 | Log12(("iemNativeSimdRegAllocTmpForGuestSimdReg: Duplicated %s for guest %s into %s for destructive calc\n",
|
---|
5426 | g_apszIemNativeHstSimdRegNames[idxSimdReg], g_aGstSimdShadowInfo[enmGstSimdReg].pszName,
|
---|
5427 | g_apszIemNativeHstSimdRegNames[idxRegNew]));
|
---|
5428 | idxSimdReg = idxRegNew;
|
---|
5429 | }
|
---|
5430 | }
|
---|
5431 | else
|
---|
5432 | {
|
---|
5433 | /*
|
---|
5434 | * Oops. Shadowed guest register already allocated!
|
---|
5435 | *
|
---|
5436 | * Allocate a new register, copy the value and, if updating, the
|
---|
5437 | * guest shadow copy assignment to the new register.
|
---|
5438 | */
|
---|
5439 | AssertMsg( enmIntendedUse != kIemNativeGstRegUse_ForUpdate
|
---|
5440 | && enmIntendedUse != kIemNativeGstRegUse_ForFullWrite,
|
---|
5441 | ("This shouldn't happen: idxSimdReg=%d enmGstSimdReg=%d enmIntendedUse=%s\n",
|
---|
5442 | idxSimdReg, enmGstSimdReg, s_pszIntendedUse[enmIntendedUse]));
|
---|
5443 |
|
---|
5444 | /** @todo share register for readonly access. */
|
---|
5445 | uint8_t const idxRegNew = iemNativeSimdRegAllocTmpEx(pReNative, poff, fRegMask,
|
---|
5446 | enmIntendedUse == kIemNativeGstRegUse_Calculation);
|
---|
5447 |
|
---|
5448 | if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
|
---|
5449 | *poff = iemNativeSimdRegAllocLoadVecRegFromVecRegSz(pReNative, *poff, enmGstSimdReg, idxRegNew, idxSimdReg, enmLoadSz);
|
---|
5450 | else
|
---|
5451 | iemNativeSimdRegSetValidLoadFlag(pReNative, idxRegNew, enmLoadSz);
|
---|
5452 |
|
---|
5453 | if ( enmIntendedUse != kIemNativeGstRegUse_ForUpdate
|
---|
5454 | && enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
|
---|
5455 | Log12(("iemNativeSimdRegAllocTmpForGuestSimdReg: Duplicated %s for guest %s into %s for %s\n",
|
---|
5456 | g_apszIemNativeHstSimdRegNames[idxSimdReg], g_aGstSimdShadowInfo[enmGstSimdReg].pszName,
|
---|
5457 | g_apszIemNativeHstSimdRegNames[idxRegNew], s_pszIntendedUse[enmIntendedUse]));
|
---|
5458 | else
|
---|
5459 | {
|
---|
5460 | iemNativeSimdRegTransferGstSimdRegShadowing(pReNative, idxSimdReg, idxRegNew, enmGstSimdReg, *poff);
|
---|
5461 | Log12(("iemNativeSimdRegAllocTmpForGuestSimdReg: Moved %s for guest %s into %s for %s\n",
|
---|
5462 | g_apszIemNativeHstSimdRegNames[idxSimdReg], g_aGstSimdShadowInfo[enmGstSimdReg].pszName,
|
---|
5463 | g_apszIemNativeHstSimdRegNames[idxRegNew], s_pszIntendedUse[enmIntendedUse]));
|
---|
5464 | }
|
---|
5465 | idxSimdReg = idxRegNew;
|
---|
5466 | }
|
---|
5467 | Assert(RT_BIT_32(idxSimdReg) & fRegMask); /* See assumption in fNoVolatileRegs docs. */
|
---|
5468 |
|
---|
5469 | #ifdef VBOX_STRICT
|
---|
5470 | /* Strict builds: Check that the value is correct. */
|
---|
5471 | if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
|
---|
5472 | *poff = iemNativeEmitGuestSimdRegValueCheck(pReNative, *poff, idxSimdReg, enmGstSimdReg, enmLoadSz);
|
---|
5473 | #endif
|
---|
5474 |
|
---|
5475 | if ( enmIntendedUse == kIemNativeGstRegUse_ForFullWrite
|
---|
5476 | || enmIntendedUse == kIemNativeGstRegUse_ForUpdate)
|
---|
5477 | {
|
---|
5478 | # if defined(IEMNATIVE_WITH_TB_DEBUG_INFO) && defined(IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK)
|
---|
5479 | iemNativeDbgInfoAddNativeOffset(pReNative, *poff);
|
---|
5480 | iemNativeDbgInfoAddGuestRegDirty(pReNative, true /*fSimdReg*/, enmGstSimdReg, idxSimdReg);
|
---|
5481 | # endif
|
---|
5482 |
|
---|
5483 | if (enmLoadSz == kIemNativeGstSimdRegLdStSz_Low128)
|
---|
5484 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, enmGstSimdReg);
|
---|
5485 | else if (enmLoadSz == kIemNativeGstSimdRegLdStSz_High128)
|
---|
5486 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_HI_U128(pReNative, enmGstSimdReg);
|
---|
5487 | else
|
---|
5488 | {
|
---|
5489 | Assert(enmLoadSz == kIemNativeGstSimdRegLdStSz_256);
|
---|
5490 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, enmGstSimdReg);
|
---|
5491 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_HI_U128(pReNative, enmGstSimdReg);
|
---|
5492 | }
|
---|
5493 | }
|
---|
5494 |
|
---|
5495 | return idxSimdReg;
|
---|
5496 | }
|
---|
5497 |
|
---|
5498 | /*
|
---|
5499 | * Allocate a new register, load it with the guest value and designate it as a copy of the
|
---|
5500 | */
|
---|
5501 | uint8_t const idxRegNew = iemNativeSimdRegAllocTmpEx(pReNative, poff, fRegMask, enmIntendedUse == kIemNativeGstRegUse_Calculation);
|
---|
5502 |
|
---|
5503 | if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
|
---|
5504 | *poff = iemNativeEmitLoadSimdRegWithGstShadowSimdReg(pReNative, *poff, idxRegNew, enmGstSimdReg, enmLoadSz);
|
---|
5505 | else
|
---|
5506 | iemNativeSimdRegSetValidLoadFlag(pReNative, idxRegNew, enmLoadSz);
|
---|
5507 |
|
---|
5508 | if (enmIntendedUse != kIemNativeGstRegUse_Calculation)
|
---|
5509 | iemNativeSimdRegMarkAsGstSimdRegShadow(pReNative, idxRegNew, enmGstSimdReg, *poff);
|
---|
5510 |
|
---|
5511 | if ( enmIntendedUse == kIemNativeGstRegUse_ForFullWrite
|
---|
5512 | || enmIntendedUse == kIemNativeGstRegUse_ForUpdate)
|
---|
5513 | {
|
---|
5514 | # if defined(IEMNATIVE_WITH_TB_DEBUG_INFO) && defined(IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK)
|
---|
5515 | iemNativeDbgInfoAddNativeOffset(pReNative, *poff);
|
---|
5516 | iemNativeDbgInfoAddGuestRegDirty(pReNative, true /*fSimdReg*/, enmGstSimdReg, idxRegNew);
|
---|
5517 | # endif
|
---|
5518 |
|
---|
5519 | if (enmLoadSz == kIemNativeGstSimdRegLdStSz_Low128)
|
---|
5520 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, enmGstSimdReg);
|
---|
5521 | else if (enmLoadSz == kIemNativeGstSimdRegLdStSz_High128)
|
---|
5522 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_HI_U128(pReNative, enmGstSimdReg);
|
---|
5523 | else
|
---|
5524 | {
|
---|
5525 | Assert(enmLoadSz == kIemNativeGstSimdRegLdStSz_256);
|
---|
5526 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_LO_U128(pReNative, enmGstSimdReg);
|
---|
5527 | IEMNATIVE_SIMD_REG_STATE_SET_DIRTY_HI_U128(pReNative, enmGstSimdReg);
|
---|
5528 | }
|
---|
5529 | }
|
---|
5530 |
|
---|
5531 | Log12(("iemNativeRegAllocTmpForGuestSimdReg: Allocated %s for guest %s %s\n",
|
---|
5532 | g_apszIemNativeHstSimdRegNames[idxRegNew], g_aGstSimdShadowInfo[enmGstSimdReg].pszName, s_pszIntendedUse[enmIntendedUse]));
|
---|
5533 |
|
---|
5534 | return idxRegNew;
|
---|
5535 | }
|
---|
5536 |
|
---|
5537 |
|
---|
5538 | /**
|
---|
5539 | * Flushes guest SIMD register shadow copies held by a set of host registers.
|
---|
5540 | *
|
---|
5541 | * This is used whenever calling an external helper for ensuring that we don't carry on
|
---|
5542 | * with any guest shadows in volatile registers, as these will get corrupted by the caller.
|
---|
5543 | *
|
---|
5544 | * @param pReNative The native recompile state.
|
---|
5545 | * @param fHstSimdRegs Set of host SIMD registers to flush guest shadows for.
|
---|
5546 | */
|
---|
5547 | DECLHIDDEN(void) iemNativeSimdRegFlushGuestShadowsByHostMask(PIEMRECOMPILERSTATE pReNative, uint32_t fHstSimdRegs) RT_NOEXCEPT
|
---|
5548 | {
|
---|
5549 | /*
|
---|
5550 | * Reduce the mask by what's currently shadowed.
|
---|
5551 | */
|
---|
5552 | uint32_t const bmHstSimdRegsWithGstShadowOld = pReNative->Core.bmHstSimdRegsWithGstShadow;
|
---|
5553 | fHstSimdRegs &= bmHstSimdRegsWithGstShadowOld;
|
---|
5554 | if (fHstSimdRegs)
|
---|
5555 | {
|
---|
5556 | uint32_t const bmHstSimdRegsWithGstShadowNew = bmHstSimdRegsWithGstShadowOld & ~fHstSimdRegs;
|
---|
5557 | Log12(("iemNativeSimdRegFlushGuestShadowsByHostMask: flushing %#RX32 (%#RX32 -> %#RX32)\n",
|
---|
5558 | fHstSimdRegs, bmHstSimdRegsWithGstShadowOld, bmHstSimdRegsWithGstShadowNew));
|
---|
5559 | pReNative->Core.bmHstSimdRegsWithGstShadow = bmHstSimdRegsWithGstShadowNew;
|
---|
5560 | if (bmHstSimdRegsWithGstShadowNew)
|
---|
5561 | {
|
---|
5562 | /*
|
---|
5563 | * Partial (likely).
|
---|
5564 | */
|
---|
5565 | uint64_t fGstShadows = 0;
|
---|
5566 | do
|
---|
5567 | {
|
---|
5568 | unsigned const idxHstSimdReg = ASMBitFirstSetU32(fHstSimdRegs) - 1;
|
---|
5569 | Assert(!(pReNative->Core.bmHstSimdRegs & RT_BIT_32(idxHstSimdReg)));
|
---|
5570 | Assert( (pReNative->Core.bmGstSimdRegShadows & pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows)
|
---|
5571 | == pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows);
|
---|
5572 | Assert(!(( pReNative->Core.bmGstSimdRegShadowDirtyLo128 | pReNative->Core.bmGstSimdRegShadowDirtyHi128)
|
---|
5573 | & pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows));
|
---|
5574 |
|
---|
5575 | fGstShadows |= pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows;
|
---|
5576 | pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows = 0;
|
---|
5577 | fHstSimdRegs &= ~RT_BIT_32(idxHstSimdReg);
|
---|
5578 | } while (fHstSimdRegs != 0);
|
---|
5579 | pReNative->Core.bmGstSimdRegShadows &= ~fGstShadows;
|
---|
5580 | }
|
---|
5581 | else
|
---|
5582 | {
|
---|
5583 | /*
|
---|
5584 | * Clear all.
|
---|
5585 | */
|
---|
5586 | do
|
---|
5587 | {
|
---|
5588 | unsigned const idxHstSimdReg = ASMBitFirstSetU32(fHstSimdRegs) - 1;
|
---|
5589 | Assert(!(pReNative->Core.bmHstSimdRegs & RT_BIT_32(idxHstSimdReg)));
|
---|
5590 | Assert( (pReNative->Core.bmGstSimdRegShadows & pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows)
|
---|
5591 | == pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows);
|
---|
5592 | Assert(!( (pReNative->Core.bmGstSimdRegShadowDirtyLo128 | pReNative->Core.bmGstSimdRegShadowDirtyHi128)
|
---|
5593 | & pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows));
|
---|
5594 |
|
---|
5595 | pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows = 0;
|
---|
5596 | fHstSimdRegs &= ~RT_BIT_32(idxHstSimdReg);
|
---|
5597 | } while (fHstSimdRegs != 0);
|
---|
5598 | pReNative->Core.bmGstSimdRegShadows = 0;
|
---|
5599 | }
|
---|
5600 | }
|
---|
5601 | }
|
---|
5602 | #endif /* IEMNATIVE_WITH_SIMD_REG_ALLOCATOR */
|
---|
5603 |
|
---|
5604 |
|
---|
5605 |
|
---|
5606 | /*********************************************************************************************************************************
|
---|
5607 | * Code emitters for flushing pending guest register writes and sanity checks *
|
---|
5608 | *********************************************************************************************************************************/
|
---|
5609 |
|
---|
5610 | #ifdef VBOX_STRICT
|
---|
5611 | /**
|
---|
5612 | * Does internal register allocator sanity checks.
|
---|
5613 | */
|
---|
5614 | DECLHIDDEN(void) iemNativeRegAssertSanity(PIEMRECOMPILERSTATE pReNative)
|
---|
5615 | {
|
---|
5616 | /*
|
---|
5617 | * Iterate host registers building a guest shadowing set.
|
---|
5618 | */
|
---|
5619 | uint64_t bmGstRegShadows = 0;
|
---|
5620 | uint32_t bmHstRegsWithGstShadow = pReNative->Core.bmHstRegsWithGstShadow;
|
---|
5621 | AssertMsg(!(bmHstRegsWithGstShadow & IEMNATIVE_REG_FIXED_MASK), ("%#RX32\n", bmHstRegsWithGstShadow));
|
---|
5622 | while (bmHstRegsWithGstShadow)
|
---|
5623 | {
|
---|
5624 | unsigned const idxHstReg = ASMBitFirstSetU32(bmHstRegsWithGstShadow) - 1;
|
---|
5625 | Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs));
|
---|
5626 | bmHstRegsWithGstShadow &= ~RT_BIT_32(idxHstReg);
|
---|
5627 |
|
---|
5628 | uint64_t fThisGstRegShadows = pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows;
|
---|
5629 | AssertMsg(fThisGstRegShadows != 0, ("idxHstReg=%d\n", idxHstReg));
|
---|
5630 | AssertMsg(fThisGstRegShadows < RT_BIT_64(kIemNativeGstReg_End), ("idxHstReg=%d %#RX64\n", idxHstReg, fThisGstRegShadows));
|
---|
5631 | bmGstRegShadows |= fThisGstRegShadows;
|
---|
5632 | while (fThisGstRegShadows)
|
---|
5633 | {
|
---|
5634 | unsigned const idxGstReg = ASMBitFirstSetU64(fThisGstRegShadows) - 1;
|
---|
5635 | fThisGstRegShadows &= ~RT_BIT_64(idxGstReg);
|
---|
5636 | AssertMsg(pReNative->Core.aidxGstRegShadows[idxGstReg] == idxHstReg,
|
---|
5637 | ("idxHstReg=%d aidxGstRegShadows[idxGstReg=%d]=%d\n",
|
---|
5638 | idxHstReg, idxGstReg, pReNative->Core.aidxGstRegShadows[idxGstReg]));
|
---|
5639 | }
|
---|
5640 | }
|
---|
5641 | AssertMsg(bmGstRegShadows == pReNative->Core.bmGstRegShadows,
|
---|
5642 | ("%RX64 vs %RX64; diff %RX64\n", bmGstRegShadows, pReNative->Core.bmGstRegShadows,
|
---|
5643 | bmGstRegShadows ^ pReNative->Core.bmGstRegShadows));
|
---|
5644 |
|
---|
5645 | /*
|
---|
5646 | * Now the other way around, checking the guest to host index array.
|
---|
5647 | */
|
---|
5648 | bmHstRegsWithGstShadow = 0;
|
---|
5649 | bmGstRegShadows = pReNative->Core.bmGstRegShadows;
|
---|
5650 | Assert(bmGstRegShadows < RT_BIT_64(kIemNativeGstReg_End));
|
---|
5651 | while (bmGstRegShadows)
|
---|
5652 | {
|
---|
5653 | unsigned const idxGstReg = ASMBitFirstSetU64(bmGstRegShadows) - 1;
|
---|
5654 | Assert(idxGstReg < RT_ELEMENTS(pReNative->Core.aidxGstRegShadows));
|
---|
5655 | bmGstRegShadows &= ~RT_BIT_64(idxGstReg);
|
---|
5656 |
|
---|
5657 | uint8_t const idxHstReg = pReNative->Core.aidxGstRegShadows[idxGstReg];
|
---|
5658 | AssertMsg(idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs), ("aidxGstRegShadows[%d]=%d\n", idxGstReg, idxHstReg));
|
---|
5659 | AssertMsg(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & RT_BIT_64(idxGstReg),
|
---|
5660 | ("idxGstReg=%d idxHstReg=%d fGstRegShadows=%RX64\n",
|
---|
5661 | idxGstReg, idxHstReg, pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows));
|
---|
5662 | bmHstRegsWithGstShadow |= RT_BIT_32(idxHstReg);
|
---|
5663 | }
|
---|
5664 | AssertMsg(bmHstRegsWithGstShadow == pReNative->Core.bmHstRegsWithGstShadow,
|
---|
5665 | ("%RX64 vs %RX64; diff %RX64\n", bmHstRegsWithGstShadow, pReNative->Core.bmHstRegsWithGstShadow,
|
---|
5666 | bmHstRegsWithGstShadow ^ pReNative->Core.bmHstRegsWithGstShadow));
|
---|
5667 | }
|
---|
5668 | #endif /* VBOX_STRICT */
|
---|
5669 |
|
---|
5670 |
|
---|
5671 | /**
|
---|
5672 | * Flushes any delayed guest register writes.
|
---|
5673 | *
|
---|
5674 | * This must be called prior to calling CImpl functions and any helpers that use
|
---|
5675 | * the guest state (like raising exceptions) and such.
|
---|
5676 | *
|
---|
5677 | * @note This function does not flush any shadowing information for guest registers. This needs to be done by
|
---|
5678 | * the caller if it wishes to do so.
|
---|
5679 | */
|
---|
5680 | DECL_HIDDEN_THROW(uint32_t)
|
---|
5681 | iemNativeRegFlushPendingWritesSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint64_t fGstShwExcept, uint64_t fGstSimdShwExcept)
|
---|
5682 | {
|
---|
5683 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
5684 | if (!(fGstShwExcept & RT_BIT_64(kIemNativeGstReg_Pc)))
|
---|
5685 | off = iemNativeEmitPcWriteback(pReNative, off);
|
---|
5686 | #else
|
---|
5687 | RT_NOREF(pReNative, fGstShwExcept);
|
---|
5688 | #endif
|
---|
5689 |
|
---|
5690 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
5691 | off = iemNativeRegFlushDirtyGuest(pReNative, off, ~fGstShwExcept);
|
---|
5692 | #endif
|
---|
5693 |
|
---|
5694 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
5695 | off = iemNativeSimdRegFlushDirtyGuest(pReNative, off, ~fGstSimdShwExcept);
|
---|
5696 | #endif
|
---|
5697 |
|
---|
5698 | return off;
|
---|
5699 | }
|
---|
5700 |
|
---|
5701 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
5702 |
|
---|
5703 | # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
|
---|
5704 |
|
---|
5705 | /**
|
---|
5706 | * Checks if the value in @a idxPcReg matches IEMCPU::uPcUpdatingDebug.
|
---|
5707 | */
|
---|
5708 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitPcDebugCheckWithReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxPcReg)
|
---|
5709 | {
|
---|
5710 | Assert(idxPcReg != IEMNATIVE_REG_FIXED_TMP0);
|
---|
5711 | Assert(pReNative->Core.fDebugPcInitialized);
|
---|
5712 |
|
---|
5713 | /* cmp [pVCpu->iem.s.uPcUpdatingDebug], pcreg */
|
---|
5714 | # ifdef RT_ARCH_AMD64
|
---|
5715 | PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);
|
---|
5716 | pCodeBuf[off++] = X86_OP_REX_W | (idxPcReg >= 8 ? X86_OP_REX_R : 0);
|
---|
5717 | pCodeBuf[off++] = 0x3b;
|
---|
5718 | off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, idxPcReg & 7, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug));
|
---|
5719 | # else
|
---|
5720 | PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
|
---|
5721 | off = iemNativeEmitLoadGprFromVCpuU64Ex(pCodeBuf, off, IEMNATIVE_REG_FIXED_TMP0, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug));
|
---|
5722 | off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, IEMNATIVE_REG_FIXED_TMP0, idxPcReg);
|
---|
5723 | # endif
|
---|
5724 |
|
---|
5725 | uint32_t offFixup = off;
|
---|
5726 | off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 1, kIemNativeInstrCond_e);
|
---|
5727 | off = iemNativeEmitBrkEx(pCodeBuf, off, UINT32_C(0x2200));
|
---|
5728 | iemNativeFixupFixedJump(pReNative, offFixup, off);
|
---|
5729 |
|
---|
5730 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
5731 | return off;
|
---|
5732 | }
|
---|
5733 |
|
---|
5734 |
|
---|
5735 | /**
|
---|
5736 | * Checks that the current RIP+offPc matches IEMCPU::uPcUpdatingDebug.
|
---|
5737 | */
|
---|
5738 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitPcDebugCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
5739 | {
|
---|
5740 | if (pReNative->Core.fDebugPcInitialized)
|
---|
5741 | {
|
---|
5742 | uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc);
|
---|
5743 | if (pReNative->Core.offPc)
|
---|
5744 | {
|
---|
5745 | uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
|
---|
5746 | PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, RT_ARCH_VAL == RT_ARCH_VAL_AMD64 ? 32 : 8);
|
---|
5747 | off = iemNativeEmitGprEqGprPlusImmEx(pCodeBuf, off, idxTmpReg, idxPcReg, pReNative->Core.offPc);
|
---|
5748 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
5749 | off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxTmpReg);
|
---|
5750 | iemNativeRegFreeTmp(pReNative, idxTmpReg);
|
---|
5751 | }
|
---|
5752 | else
|
---|
5753 | off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxPcReg);
|
---|
5754 | iemNativeRegFreeTmp(pReNative, idxPcReg);
|
---|
5755 | }
|
---|
5756 | return off;
|
---|
5757 | }
|
---|
5758 |
|
---|
5759 | # endif /* IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG */
|
---|
5760 |
|
---|
5761 | /**
|
---|
5762 | * Emits code to update the guest RIP value by adding the current offset since the start of the last RIP update.
|
---|
5763 | */
|
---|
5764 | DECL_HIDDEN_THROW(uint32_t) iemNativeEmitPcWritebackSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
5765 | {
|
---|
5766 | Assert(pReNative->Core.offPc);
|
---|
5767 | # if !defined(IEMNATIVE_WITH_TB_DEBUG_INFO) && !defined(VBOX_WITH_STATISTICS)
|
---|
5768 | Log4(("iemNativeEmitPcWritebackSlow: offPc=%#RX64 -> 0; off=%#x\n", pReNative->Core.offPc, off));
|
---|
5769 | # else
|
---|
5770 | uint8_t const idxOldInstrPlusOne = pReNative->idxInstrPlusOneOfLastPcUpdate;
|
---|
5771 | uint8_t idxCurCall = pReNative->idxCurCall;
|
---|
5772 | uint8_t idxInstr = pReNative->pTbOrg->Thrd.paCalls[idxCurCall].idxInstr; /* unreliable*/
|
---|
5773 | while (idxInstr == 0 && idxInstr + 1 < idxOldInstrPlusOne && idxCurCall > 0)
|
---|
5774 | idxInstr = pReNative->pTbOrg->Thrd.paCalls[--idxCurCall].idxInstr;
|
---|
5775 | pReNative->idxInstrPlusOneOfLastPcUpdate = RT_MAX(idxInstr + 1, idxOldInstrPlusOne);
|
---|
5776 | uint8_t const cInstrsSkipped = idxInstr <= idxOldInstrPlusOne ? 0 : idxInstr - idxOldInstrPlusOne;
|
---|
5777 | Log4(("iemNativeEmitPcWritebackSlow: offPc=%#RX64 -> 0; off=%#x; idxInstr=%u cInstrsSkipped=%u\n",
|
---|
5778 | pReNative->Core.offPc, off, idxInstr, cInstrsSkipped));
|
---|
5779 |
|
---|
5780 | STAM_COUNTER_ADD(&pReNative->pVCpu->iem.s.StatNativePcUpdateDelayed, cInstrsSkipped);
|
---|
5781 |
|
---|
5782 | # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
5783 | iemNativeDbgInfoAddNativeOffset(pReNative, off);
|
---|
5784 | iemNativeDbgInfoAddDelayedPcUpdate(pReNative, pReNative->Core.offPc, cInstrsSkipped);
|
---|
5785 | # endif
|
---|
5786 | # endif
|
---|
5787 |
|
---|
5788 | # ifndef IEMNATIVE_REG_FIXED_PC_DBG
|
---|
5789 | /* Allocate a temporary PC register. */
|
---|
5790 | uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
|
---|
5791 |
|
---|
5792 | /* Perform the addition and store the result. */
|
---|
5793 | off = iemNativeEmitAddGprImm(pReNative, off, idxPcReg, pReNative->Core.offPc);
|
---|
5794 | off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
|
---|
5795 | # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
|
---|
5796 | off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxPcReg);
|
---|
5797 | # endif
|
---|
5798 |
|
---|
5799 | /* Free but don't flush the PC register. */
|
---|
5800 | iemNativeRegFreeTmp(pReNative, idxPcReg);
|
---|
5801 | # else
|
---|
5802 | /* Compare the shadow with the context value, they should match. */
|
---|
5803 | off = iemNativeEmitAddGprImm(pReNative, off, IEMNATIVE_REG_FIXED_PC_DBG, pReNative->Core.offPc);
|
---|
5804 | off = iemNativeEmitGuestRegValueCheck(pReNative, off, IEMNATIVE_REG_FIXED_PC_DBG, kIemNativeGstReg_Pc);
|
---|
5805 | # endif
|
---|
5806 |
|
---|
5807 | pReNative->Core.offPc = 0;
|
---|
5808 |
|
---|
5809 | return off;
|
---|
5810 | }
|
---|
5811 |
|
---|
5812 | #endif /* IEMNATIVE_WITH_DELAYED_PC_UPDATING */
|
---|
5813 |
|
---|
5814 |
|
---|
5815 | /*********************************************************************************************************************************
|
---|
5816 | * Code Emitters (larger snippets) *
|
---|
5817 | *********************************************************************************************************************************/
|
---|
5818 |
|
---|
5819 | /**
|
---|
5820 | * Loads the guest shadow register @a enmGstReg into host reg @a idxHstReg, zero
|
---|
5821 | * extending to 64-bit width.
|
---|
5822 | *
|
---|
5823 | * @returns New code buffer offset on success, UINT32_MAX on failure.
|
---|
5824 | * @param pReNative .
|
---|
5825 | * @param off The current code buffer position.
|
---|
5826 | * @param idxHstReg The host register to load the guest register value into.
|
---|
5827 | * @param enmGstReg The guest register to load.
|
---|
5828 | *
|
---|
5829 | * @note This does not mark @a idxHstReg as having a shadow copy of @a enmGstReg,
|
---|
5830 | * that is something the caller needs to do if applicable.
|
---|
5831 | */
|
---|
5832 | DECL_HIDDEN_THROW(uint32_t)
|
---|
5833 | iemNativeEmitLoadGprWithGstShadowReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxHstReg, IEMNATIVEGSTREG enmGstReg)
|
---|
5834 | {
|
---|
5835 | Assert((unsigned)enmGstReg < (unsigned)kIemNativeGstReg_End);
|
---|
5836 | Assert(g_aGstShadowInfo[enmGstReg].cb != 0);
|
---|
5837 |
|
---|
5838 | switch (g_aGstShadowInfo[enmGstReg].cb)
|
---|
5839 | {
|
---|
5840 | case sizeof(uint64_t):
|
---|
5841 | return iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off);
|
---|
5842 | case sizeof(uint32_t):
|
---|
5843 | return iemNativeEmitLoadGprFromVCpuU32(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off);
|
---|
5844 | case sizeof(uint16_t):
|
---|
5845 | return iemNativeEmitLoadGprFromVCpuU16(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off);
|
---|
5846 | #if 0 /* not present in the table. */
|
---|
5847 | case sizeof(uint8_t):
|
---|
5848 | return iemNativeEmitLoadGprFromVCpuU8(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off);
|
---|
5849 | #endif
|
---|
5850 | default:
|
---|
5851 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IPE_NOT_REACHED_DEFAULT_CASE));
|
---|
5852 | }
|
---|
5853 | }
|
---|
5854 |
|
---|
5855 |
|
---|
5856 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
5857 | /**
|
---|
5858 | * Loads the guest shadow SIMD register @a enmGstSimdReg into host SIMD reg @a idxHstSimdReg.
|
---|
5859 | *
|
---|
5860 | * @returns New code buffer offset on success, UINT32_MAX on failure.
|
---|
5861 | * @param pReNative The recompiler state.
|
---|
5862 | * @param off The current code buffer position.
|
---|
5863 | * @param idxHstSimdReg The host register to load the guest register value into.
|
---|
5864 | * @param enmGstSimdReg The guest register to load.
|
---|
5865 | * @param enmLoadSz The load size of the register.
|
---|
5866 | *
|
---|
5867 | * @note This does not mark @a idxHstReg as having a shadow copy of @a enmGstReg,
|
---|
5868 | * that is something the caller needs to do if applicable.
|
---|
5869 | */
|
---|
5870 | DECL_HIDDEN_THROW(uint32_t)
|
---|
5871 | iemNativeEmitLoadSimdRegWithGstShadowSimdReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxHstSimdReg,
|
---|
5872 | IEMNATIVEGSTSIMDREG enmGstSimdReg, IEMNATIVEGSTSIMDREGLDSTSZ enmLoadSz)
|
---|
5873 | {
|
---|
5874 | Assert((unsigned)enmGstSimdReg < RT_ELEMENTS(g_aGstSimdShadowInfo));
|
---|
5875 |
|
---|
5876 | iemNativeSimdRegSetValidLoadFlag(pReNative, idxHstSimdReg, enmLoadSz);
|
---|
5877 | switch (enmLoadSz)
|
---|
5878 | {
|
---|
5879 | case kIemNativeGstSimdRegLdStSz_256:
|
---|
5880 | off = iemNativeEmitSimdLoadVecRegFromVCpuLowU128(pReNative, off, idxHstSimdReg, g_aGstSimdShadowInfo[enmGstSimdReg].offXmm);
|
---|
5881 | return iemNativeEmitSimdLoadVecRegFromVCpuHighU128(pReNative, off, idxHstSimdReg, g_aGstSimdShadowInfo[enmGstSimdReg].offYmm);
|
---|
5882 | case kIemNativeGstSimdRegLdStSz_Low128:
|
---|
5883 | return iemNativeEmitSimdLoadVecRegFromVCpuLowU128(pReNative, off, idxHstSimdReg, g_aGstSimdShadowInfo[enmGstSimdReg].offXmm);
|
---|
5884 | case kIemNativeGstSimdRegLdStSz_High128:
|
---|
5885 | return iemNativeEmitSimdLoadVecRegFromVCpuHighU128(pReNative, off, idxHstSimdReg, g_aGstSimdShadowInfo[enmGstSimdReg].offYmm);
|
---|
5886 | default:
|
---|
5887 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IPE_NOT_REACHED_DEFAULT_CASE));
|
---|
5888 | }
|
---|
5889 | }
|
---|
5890 | #endif /* IEMNATIVE_WITH_SIMD_REG_ALLOCATOR */
|
---|
5891 |
|
---|
5892 | #ifdef VBOX_STRICT
|
---|
5893 |
|
---|
5894 | /**
|
---|
5895 | * Emitting code that checks that the value of @a idxReg is UINT32_MAX or less.
|
---|
5896 | *
|
---|
5897 | * @note May of course trash IEMNATIVE_REG_FIXED_TMP0.
|
---|
5898 | * Trashes EFLAGS on AMD64.
|
---|
5899 | */
|
---|
5900 | DECL_HIDDEN_THROW(uint32_t)
|
---|
5901 | iemNativeEmitTop32BitsClearCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxReg)
|
---|
5902 | {
|
---|
5903 | # ifdef RT_ARCH_AMD64
|
---|
5904 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20);
|
---|
5905 |
|
---|
5906 | /* rol reg64, 32 */
|
---|
5907 | pbCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_B);
|
---|
5908 | pbCodeBuf[off++] = 0xc1;
|
---|
5909 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7);
|
---|
5910 | pbCodeBuf[off++] = 32;
|
---|
5911 |
|
---|
5912 | /* test reg32, ffffffffh */
|
---|
5913 | if (idxReg >= 8)
|
---|
5914 | pbCodeBuf[off++] = X86_OP_REX_B;
|
---|
5915 | pbCodeBuf[off++] = 0xf7;
|
---|
5916 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7);
|
---|
5917 | pbCodeBuf[off++] = 0xff;
|
---|
5918 | pbCodeBuf[off++] = 0xff;
|
---|
5919 | pbCodeBuf[off++] = 0xff;
|
---|
5920 | pbCodeBuf[off++] = 0xff;
|
---|
5921 |
|
---|
5922 | /* je/jz +1 */
|
---|
5923 | pbCodeBuf[off++] = 0x74;
|
---|
5924 | pbCodeBuf[off++] = 0x01;
|
---|
5925 |
|
---|
5926 | /* int3 */
|
---|
5927 | pbCodeBuf[off++] = 0xcc;
|
---|
5928 |
|
---|
5929 | /* rol reg64, 32 */
|
---|
5930 | pbCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_B);
|
---|
5931 | pbCodeBuf[off++] = 0xc1;
|
---|
5932 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7);
|
---|
5933 | pbCodeBuf[off++] = 32;
|
---|
5934 |
|
---|
5935 | # elif defined(RT_ARCH_ARM64)
|
---|
5936 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
|
---|
5937 | /* lsr tmp0, reg64, #32 */
|
---|
5938 | pu32CodeBuf[off++] = Armv8A64MkInstrLsrImm(IEMNATIVE_REG_FIXED_TMP0, idxReg, 32);
|
---|
5939 | /* cbz tmp0, +1 */
|
---|
5940 | pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 2, IEMNATIVE_REG_FIXED_TMP0);
|
---|
5941 | /* brk #0x1100 */
|
---|
5942 | pu32CodeBuf[off++] = Armv8A64MkInstrBrk(UINT32_C(0x1100));
|
---|
5943 |
|
---|
5944 | # else
|
---|
5945 | # error "Port me!"
|
---|
5946 | # endif
|
---|
5947 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
5948 | return off;
|
---|
5949 | }
|
---|
5950 |
|
---|
5951 |
|
---|
5952 | /**
|
---|
5953 | * Emitting code that checks that the content of register @a idxReg is the same
|
---|
5954 | * as what's in the guest register @a enmGstReg, resulting in a breakpoint
|
---|
5955 | * instruction if that's not the case.
|
---|
5956 | *
|
---|
5957 | * @note May of course trash IEMNATIVE_REG_FIXED_TMP0.
|
---|
5958 | * Trashes EFLAGS on AMD64.
|
---|
5959 | */
|
---|
5960 | DECL_HIDDEN_THROW(uint32_t)
|
---|
5961 | iemNativeEmitGuestRegValueCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxReg, IEMNATIVEGSTREG enmGstReg)
|
---|
5962 | {
|
---|
5963 | #if defined(IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK)
|
---|
5964 | /* We can't check the value against whats in CPUMCTX if the register is already marked as dirty, so skip the check. */
|
---|
5965 | if (pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(enmGstReg))
|
---|
5966 | return off;
|
---|
5967 | #endif
|
---|
5968 |
|
---|
5969 | # ifdef RT_ARCH_AMD64
|
---|
5970 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);
|
---|
5971 |
|
---|
5972 | /* cmp reg, [mem] */
|
---|
5973 | if (g_aGstShadowInfo[enmGstReg].cb == sizeof(uint8_t))
|
---|
5974 | {
|
---|
5975 | if (idxReg >= 8)
|
---|
5976 | pbCodeBuf[off++] = X86_OP_REX_R;
|
---|
5977 | pbCodeBuf[off++] = 0x38;
|
---|
5978 | }
|
---|
5979 | else
|
---|
5980 | {
|
---|
5981 | if (g_aGstShadowInfo[enmGstReg].cb == sizeof(uint64_t))
|
---|
5982 | pbCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_R);
|
---|
5983 | else
|
---|
5984 | {
|
---|
5985 | if (g_aGstShadowInfo[enmGstReg].cb == sizeof(uint16_t))
|
---|
5986 | pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
|
---|
5987 | else
|
---|
5988 | AssertStmt(g_aGstShadowInfo[enmGstReg].cb == sizeof(uint32_t),
|
---|
5989 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_6));
|
---|
5990 | if (idxReg >= 8)
|
---|
5991 | pbCodeBuf[off++] = X86_OP_REX_R;
|
---|
5992 | }
|
---|
5993 | pbCodeBuf[off++] = 0x39;
|
---|
5994 | }
|
---|
5995 | off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, idxReg, g_aGstShadowInfo[enmGstReg].off);
|
---|
5996 |
|
---|
5997 | /* je/jz +1 */
|
---|
5998 | pbCodeBuf[off++] = 0x74;
|
---|
5999 | pbCodeBuf[off++] = 0x01;
|
---|
6000 |
|
---|
6001 | /* int3 */
|
---|
6002 | pbCodeBuf[off++] = 0xcc;
|
---|
6003 |
|
---|
6004 | /* For values smaller than the register size, we must check that the rest
|
---|
6005 | of the register is all zeros. */
|
---|
6006 | if (g_aGstShadowInfo[enmGstReg].cb < sizeof(uint32_t))
|
---|
6007 | {
|
---|
6008 | /* test reg64, imm32 */
|
---|
6009 | pbCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_B);
|
---|
6010 | pbCodeBuf[off++] = 0xf7;
|
---|
6011 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7);
|
---|
6012 | pbCodeBuf[off++] = 0;
|
---|
6013 | pbCodeBuf[off++] = g_aGstShadowInfo[enmGstReg].cb > sizeof(uint8_t) ? 0 : 0xff;
|
---|
6014 | pbCodeBuf[off++] = 0xff;
|
---|
6015 | pbCodeBuf[off++] = 0xff;
|
---|
6016 |
|
---|
6017 | /* je/jz +1 */
|
---|
6018 | pbCodeBuf[off++] = 0x74;
|
---|
6019 | pbCodeBuf[off++] = 0x01;
|
---|
6020 |
|
---|
6021 | /* int3 */
|
---|
6022 | pbCodeBuf[off++] = 0xcc;
|
---|
6023 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
6024 | }
|
---|
6025 | else
|
---|
6026 | {
|
---|
6027 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
6028 | if (g_aGstShadowInfo[enmGstReg].cb == sizeof(uint32_t))
|
---|
6029 | iemNativeEmitTop32BitsClearCheck(pReNative, off, idxReg);
|
---|
6030 | }
|
---|
6031 |
|
---|
6032 | # elif defined(RT_ARCH_ARM64)
|
---|
6033 | /* mov TMP0, [gstreg] */
|
---|
6034 | off = iemNativeEmitLoadGprWithGstShadowReg(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, enmGstReg);
|
---|
6035 |
|
---|
6036 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
|
---|
6037 | /* sub tmp0, tmp0, idxReg */
|
---|
6038 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(true /*fSub*/, IEMNATIVE_REG_FIXED_TMP0, IEMNATIVE_REG_FIXED_TMP0, idxReg);
|
---|
6039 | /* cbz tmp0, +1 */
|
---|
6040 | pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 2, IEMNATIVE_REG_FIXED_TMP0);
|
---|
6041 | /* brk #0x1000+enmGstReg */
|
---|
6042 | pu32CodeBuf[off++] = Armv8A64MkInstrBrk((uint32_t)enmGstReg | UINT32_C(0x1000));
|
---|
6043 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
6044 |
|
---|
6045 | # else
|
---|
6046 | # error "Port me!"
|
---|
6047 | # endif
|
---|
6048 | return off;
|
---|
6049 | }
|
---|
6050 |
|
---|
6051 |
|
---|
6052 | # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
6053 | # ifdef RT_ARCH_AMD64
|
---|
6054 | /**
|
---|
6055 | * Helper for AMD64 to emit code which checks the low 128-bits of the given SIMD register against the given vCPU offset.
|
---|
6056 | */
|
---|
6057 | DECL_FORCE_INLINE_THROW(uint32_t) iemNativeEmitGuestSimdRegValueCheckVCpuU128(uint8_t * const pbCodeBuf, uint32_t off, uint8_t idxSimdReg, uint32_t offVCpu)
|
---|
6058 | {
|
---|
6059 | /* pcmpeqq vectmp0, [gstreg] (ASSUMES SSE4.1) */
|
---|
6060 | pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
|
---|
6061 | if (idxSimdReg >= 8)
|
---|
6062 | pbCodeBuf[off++] = X86_OP_REX_R;
|
---|
6063 | pbCodeBuf[off++] = 0x0f;
|
---|
6064 | pbCodeBuf[off++] = 0x38;
|
---|
6065 | pbCodeBuf[off++] = 0x29;
|
---|
6066 | off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, idxSimdReg, offVCpu);
|
---|
6067 |
|
---|
6068 | /* pextrq tmp0, vectmp0, #0 (ASSUMES SSE4.1). */
|
---|
6069 | pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
|
---|
6070 | pbCodeBuf[off++] = X86_OP_REX_W
|
---|
6071 | | (idxSimdReg < 8 ? 0 : X86_OP_REX_R)
|
---|
6072 | | (IEMNATIVE_REG_FIXED_TMP0 < 8 ? 0 : X86_OP_REX_B);
|
---|
6073 | pbCodeBuf[off++] = 0x0f;
|
---|
6074 | pbCodeBuf[off++] = 0x3a;
|
---|
6075 | pbCodeBuf[off++] = 0x16;
|
---|
6076 | pbCodeBuf[off++] = 0xeb;
|
---|
6077 | pbCodeBuf[off++] = 0x00;
|
---|
6078 |
|
---|
6079 | /* cmp tmp0, 0xffffffffffffffff. */
|
---|
6080 | pbCodeBuf[off++] = X86_OP_REX_W | (IEMNATIVE_REG_FIXED_TMP0 < 8 ? 0 : X86_OP_REX_B);
|
---|
6081 | pbCodeBuf[off++] = 0x83;
|
---|
6082 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, IEMNATIVE_REG_FIXED_TMP0 & 7);
|
---|
6083 | pbCodeBuf[off++] = 0xff;
|
---|
6084 |
|
---|
6085 | /* je/jz +1 */
|
---|
6086 | pbCodeBuf[off++] = 0x74;
|
---|
6087 | pbCodeBuf[off++] = 0x01;
|
---|
6088 |
|
---|
6089 | /* int3 */
|
---|
6090 | pbCodeBuf[off++] = 0xcc;
|
---|
6091 |
|
---|
6092 | /* pextrq tmp0, vectmp0, #1 (ASSUMES SSE4.1). */
|
---|
6093 | pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
|
---|
6094 | pbCodeBuf[off++] = X86_OP_REX_W
|
---|
6095 | | (idxSimdReg < 8 ? 0 : X86_OP_REX_R)
|
---|
6096 | | (IEMNATIVE_REG_FIXED_TMP0 < 8 ? 0 : X86_OP_REX_B);
|
---|
6097 | pbCodeBuf[off++] = 0x0f;
|
---|
6098 | pbCodeBuf[off++] = 0x3a;
|
---|
6099 | pbCodeBuf[off++] = 0x16;
|
---|
6100 | pbCodeBuf[off++] = 0xeb;
|
---|
6101 | pbCodeBuf[off++] = 0x01;
|
---|
6102 |
|
---|
6103 | /* cmp tmp0, 0xffffffffffffffff. */
|
---|
6104 | pbCodeBuf[off++] = X86_OP_REX_W | (IEMNATIVE_REG_FIXED_TMP0 < 8 ? 0 : X86_OP_REX_B);
|
---|
6105 | pbCodeBuf[off++] = 0x83;
|
---|
6106 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, IEMNATIVE_REG_FIXED_TMP0 & 7);
|
---|
6107 | pbCodeBuf[off++] = 0xff;
|
---|
6108 |
|
---|
6109 | /* je/jz +1 */
|
---|
6110 | pbCodeBuf[off++] = 0x74;
|
---|
6111 | pbCodeBuf[off++] = 0x01;
|
---|
6112 |
|
---|
6113 | /* int3 */
|
---|
6114 | pbCodeBuf[off++] = 0xcc;
|
---|
6115 |
|
---|
6116 | return off;
|
---|
6117 | }
|
---|
6118 | # endif
|
---|
6119 |
|
---|
6120 |
|
---|
6121 | /**
|
---|
6122 | * Emitting code that checks that the content of SIMD register @a idxSimdReg is the same
|
---|
6123 | * as what's in the guest register @a enmGstSimdReg, resulting in a breakpoint
|
---|
6124 | * instruction if that's not the case.
|
---|
6125 | *
|
---|
6126 | * @note May of course trash IEMNATIVE_SIMD_REG_FIXED_TMP0 and IEMNATIVE_REG_FIXED_TMP0.
|
---|
6127 | * Trashes EFLAGS on AMD64.
|
---|
6128 | */
|
---|
6129 | DECL_HIDDEN_THROW(uint32_t)
|
---|
6130 | iemNativeEmitGuestSimdRegValueCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxSimdReg,
|
---|
6131 | IEMNATIVEGSTSIMDREG enmGstSimdReg, IEMNATIVEGSTSIMDREGLDSTSZ enmLoadSz)
|
---|
6132 | {
|
---|
6133 | /* We can't check the value against whats in CPUMCTX if the register is already marked as dirty, so skip the check. */
|
---|
6134 | if ( ( enmLoadSz == kIemNativeGstSimdRegLdStSz_256
|
---|
6135 | && ( IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_LO_U128(pReNative, enmGstSimdReg)
|
---|
6136 | || IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_HI_U128(pReNative, enmGstSimdReg)))
|
---|
6137 | || ( enmLoadSz == kIemNativeGstSimdRegLdStSz_Low128
|
---|
6138 | && IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_LO_U128(pReNative, enmGstSimdReg))
|
---|
6139 | || ( enmLoadSz == kIemNativeGstSimdRegLdStSz_High128
|
---|
6140 | && IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_HI_U128(pReNative, enmGstSimdReg)))
|
---|
6141 | return off;
|
---|
6142 |
|
---|
6143 | # ifdef RT_ARCH_AMD64
|
---|
6144 | if (enmLoadSz == kIemNativeGstSimdRegLdStSz_Low128 || enmLoadSz == kIemNativeGstSimdRegLdStSz_256)
|
---|
6145 | {
|
---|
6146 | /* movdqa vectmp0, idxSimdReg */
|
---|
6147 | off = iemNativeEmitSimdLoadVecRegFromVecRegU128(pReNative, off, IEMNATIVE_SIMD_REG_FIXED_TMP0, idxSimdReg);
|
---|
6148 |
|
---|
6149 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 44);
|
---|
6150 |
|
---|
6151 | off = iemNativeEmitGuestSimdRegValueCheckVCpuU128(pbCodeBuf, off, IEMNATIVE_SIMD_REG_FIXED_TMP0,
|
---|
6152 | g_aGstSimdShadowInfo[enmGstSimdReg].offXmm);
|
---|
6153 | }
|
---|
6154 |
|
---|
6155 | if (enmLoadSz == kIemNativeGstSimdRegLdStSz_High128 || enmLoadSz == kIemNativeGstSimdRegLdStSz_256)
|
---|
6156 | {
|
---|
6157 | /* Due to the fact that CPUMCTX stores the high 128-bit separately we need to do this all over again for the high part. */
|
---|
6158 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 50);
|
---|
6159 |
|
---|
6160 | /* vextracti128 vectmp0, idxSimdReg, 1 */
|
---|
6161 | pbCodeBuf[off++] = X86_OP_VEX3;
|
---|
6162 | pbCodeBuf[off++] = (idxSimdReg < 8 ? X86_OP_VEX3_BYTE1_R : 0)
|
---|
6163 | | X86_OP_VEX3_BYTE1_X
|
---|
6164 | | (IEMNATIVE_SIMD_REG_FIXED_TMP0 < 8 ? X86_OP_VEX3_BYTE1_B : 0)
|
---|
6165 | | 0x03; /* Opcode map */
|
---|
6166 | pbCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE_NO_VVVV(false /*f64BitOpSz*/, true /*f256BitAvx*/, X86_OP_VEX3_BYTE2_P_066H);
|
---|
6167 | pbCodeBuf[off++] = 0x39;
|
---|
6168 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxSimdReg & 7, IEMNATIVE_SIMD_REG_FIXED_TMP0 & 7);
|
---|
6169 | pbCodeBuf[off++] = 0x01;
|
---|
6170 |
|
---|
6171 | off = iemNativeEmitGuestSimdRegValueCheckVCpuU128(pbCodeBuf, off, IEMNATIVE_SIMD_REG_FIXED_TMP0,
|
---|
6172 | g_aGstSimdShadowInfo[enmGstSimdReg].offYmm);
|
---|
6173 | }
|
---|
6174 | # elif defined(RT_ARCH_ARM64)
|
---|
6175 | /* mov vectmp0, [gstreg] */
|
---|
6176 | off = iemNativeEmitLoadSimdRegWithGstShadowSimdReg(pReNative, off, IEMNATIVE_SIMD_REG_FIXED_TMP0, enmGstSimdReg, enmLoadSz);
|
---|
6177 |
|
---|
6178 | if (enmLoadSz == kIemNativeGstSimdRegLdStSz_Low128 || enmLoadSz == kIemNativeGstSimdRegLdStSz_256)
|
---|
6179 | {
|
---|
6180 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6);
|
---|
6181 | /* eor vectmp0, vectmp0, idxSimdReg */
|
---|
6182 | pu32CodeBuf[off++] = Armv8A64MkVecInstrEor(IEMNATIVE_SIMD_REG_FIXED_TMP0, IEMNATIVE_SIMD_REG_FIXED_TMP0, idxSimdReg);
|
---|
6183 | /* uaddlv vectmp0, vectmp0.16B */
|
---|
6184 | pu32CodeBuf[off++] = Armv8A64MkVecInstrUAddLV(IEMNATIVE_SIMD_REG_FIXED_TMP0, IEMNATIVE_SIMD_REG_FIXED_TMP0, kArmv8InstrUAddLVSz_16B);
|
---|
6185 | /* umov tmp0, vectmp0.H[0] */
|
---|
6186 | pu32CodeBuf[off++] = Armv8A64MkVecInstrUmov(IEMNATIVE_REG_FIXED_TMP0, IEMNATIVE_SIMD_REG_FIXED_TMP0,
|
---|
6187 | 0 /*idxElem*/, kArmv8InstrUmovInsSz_U16, false /*f64Bit*/);
|
---|
6188 | /* cbz tmp0, +1 */
|
---|
6189 | pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 2, IEMNATIVE_REG_FIXED_TMP0);
|
---|
6190 | /* brk #0x1000+enmGstReg */
|
---|
6191 | pu32CodeBuf[off++] = Armv8A64MkInstrBrk((uint32_t)enmGstSimdReg | UINT32_C(0x1000));
|
---|
6192 | }
|
---|
6193 |
|
---|
6194 | if (enmLoadSz == kIemNativeGstSimdRegLdStSz_High128 || enmLoadSz == kIemNativeGstSimdRegLdStSz_256)
|
---|
6195 | {
|
---|
6196 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6);
|
---|
6197 | /* eor vectmp0 + 1, vectmp0 + 1, idxSimdReg */
|
---|
6198 | pu32CodeBuf[off++] = Armv8A64MkVecInstrEor(IEMNATIVE_SIMD_REG_FIXED_TMP0 + 1, IEMNATIVE_SIMD_REG_FIXED_TMP0 + 1, idxSimdReg + 1);
|
---|
6199 | /* uaddlv vectmp0 + 1, (vectmp0 + 1).16B */
|
---|
6200 | pu32CodeBuf[off++] = Armv8A64MkVecInstrUAddLV(IEMNATIVE_SIMD_REG_FIXED_TMP0 + 1, IEMNATIVE_SIMD_REG_FIXED_TMP0 + 1, kArmv8InstrUAddLVSz_16B);
|
---|
6201 | /* umov tmp0, (vectmp0 + 1).H[0] */
|
---|
6202 | pu32CodeBuf[off++] = Armv8A64MkVecInstrUmov(IEMNATIVE_REG_FIXED_TMP0, IEMNATIVE_SIMD_REG_FIXED_TMP0 + 1,
|
---|
6203 | 0 /*idxElem*/, kArmv8InstrUmovInsSz_U16, false /*f64Bit*/);
|
---|
6204 | /* cbz tmp0, +1 */
|
---|
6205 | pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 2, IEMNATIVE_REG_FIXED_TMP0);
|
---|
6206 | /* brk #0x1000+enmGstReg */
|
---|
6207 | pu32CodeBuf[off++] = Armv8A64MkInstrBrk((uint32_t)enmGstSimdReg | UINT32_C(0x1000));
|
---|
6208 | }
|
---|
6209 |
|
---|
6210 | # else
|
---|
6211 | # error "Port me!"
|
---|
6212 | # endif
|
---|
6213 |
|
---|
6214 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
6215 | return off;
|
---|
6216 | }
|
---|
6217 | # endif /* IEMNATIVE_WITH_SIMD_REG_ALLOCATOR */
|
---|
6218 |
|
---|
6219 |
|
---|
6220 | /**
|
---|
6221 | * Emitting code that checks that IEMCPU::fExec matches @a fExec for all
|
---|
6222 | * important bits.
|
---|
6223 | *
|
---|
6224 | * @note May of course trash IEMNATIVE_REG_FIXED_TMP0.
|
---|
6225 | * Trashes EFLAGS on AMD64.
|
---|
6226 | */
|
---|
6227 | DECL_HIDDEN_THROW(uint32_t)
|
---|
6228 | iemNativeEmitExecFlagsCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fExec)
|
---|
6229 | {
|
---|
6230 | uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off);
|
---|
6231 | off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.fExec));
|
---|
6232 | off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegTmp, IEMTB_F_IEM_F_MASK & IEMTB_F_KEY_MASK);
|
---|
6233 | off = iemNativeEmitCmpGpr32WithImm(pReNative, off, idxRegTmp, fExec & IEMTB_F_KEY_MASK);
|
---|
6234 |
|
---|
6235 | #ifdef RT_ARCH_AMD64
|
---|
6236 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
|
---|
6237 |
|
---|
6238 | /* je/jz +1 */
|
---|
6239 | pbCodeBuf[off++] = 0x74;
|
---|
6240 | pbCodeBuf[off++] = 0x01;
|
---|
6241 |
|
---|
6242 | /* int3 */
|
---|
6243 | pbCodeBuf[off++] = 0xcc;
|
---|
6244 |
|
---|
6245 | # elif defined(RT_ARCH_ARM64)
|
---|
6246 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
|
---|
6247 |
|
---|
6248 | /* b.eq +1 */
|
---|
6249 | pu32CodeBuf[off++] = Armv8A64MkInstrBCond(kArmv8InstrCond_Eq, 2);
|
---|
6250 | /* brk #0x2000 */
|
---|
6251 | pu32CodeBuf[off++] = Armv8A64MkInstrBrk(UINT32_C(0x2000));
|
---|
6252 |
|
---|
6253 | # else
|
---|
6254 | # error "Port me!"
|
---|
6255 | # endif
|
---|
6256 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
6257 |
|
---|
6258 | iemNativeRegFreeTmp(pReNative, idxRegTmp);
|
---|
6259 | return off;
|
---|
6260 | }
|
---|
6261 |
|
---|
6262 | #endif /* VBOX_STRICT */
|
---|
6263 |
|
---|
6264 |
|
---|
6265 | #ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING
|
---|
6266 | /**
|
---|
6267 | * Worker for IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK.
|
---|
6268 | */
|
---|
6269 | DECL_HIDDEN_THROW(uint32_t)
|
---|
6270 | iemNativeEmitEFlagsSkippingCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fEflNeeded)
|
---|
6271 | {
|
---|
6272 | uint32_t const offVCpu = RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags);
|
---|
6273 |
|
---|
6274 | fEflNeeded &= X86_EFL_STATUS_BITS;
|
---|
6275 | if (fEflNeeded)
|
---|
6276 | {
|
---|
6277 | # ifdef RT_ARCH_AMD64
|
---|
6278 | /* test dword [pVCpu + offVCpu], imm32 */
|
---|
6279 | PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 13);
|
---|
6280 | if (fEflNeeded <= 0xff)
|
---|
6281 | {
|
---|
6282 | pCodeBuf[off++] = 0xf6;
|
---|
6283 | off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 0, offVCpu);
|
---|
6284 | pCodeBuf[off++] = RT_BYTE1(fEflNeeded);
|
---|
6285 | }
|
---|
6286 | else
|
---|
6287 | {
|
---|
6288 | pCodeBuf[off++] = 0xf7;
|
---|
6289 | off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 0, offVCpu);
|
---|
6290 | pCodeBuf[off++] = RT_BYTE1(fEflNeeded);
|
---|
6291 | pCodeBuf[off++] = RT_BYTE2(fEflNeeded);
|
---|
6292 | pCodeBuf[off++] = RT_BYTE3(fEflNeeded);
|
---|
6293 | pCodeBuf[off++] = RT_BYTE4(fEflNeeded);
|
---|
6294 | }
|
---|
6295 |
|
---|
6296 | off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 3, kIemNativeInstrCond_e);
|
---|
6297 | pCodeBuf[off++] = 0xcc;
|
---|
6298 |
|
---|
6299 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
6300 |
|
---|
6301 | # else
|
---|
6302 | uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off);
|
---|
6303 | off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, idxRegTmp, offVCpu);
|
---|
6304 | off = iemNativeEmitTestAnyBitsInGpr(pReNative, off, idxRegTmp, fEflNeeded);
|
---|
6305 | # ifdef RT_ARCH_ARM64
|
---|
6306 | off = iemNativeEmitJzToFixed(pReNative, off, off + 2);
|
---|
6307 | off = iemNativeEmitBrk(pReNative, off, 0x7777);
|
---|
6308 | # else
|
---|
6309 | # error "Port me!"
|
---|
6310 | # endif
|
---|
6311 | iemNativeRegFreeTmp(pReNative, idxRegTmp);
|
---|
6312 | # endif
|
---|
6313 | }
|
---|
6314 | return off;
|
---|
6315 | }
|
---|
6316 | #endif /* IEMNATIVE_STRICT_EFLAGS_SKIPPING */
|
---|
6317 |
|
---|
6318 |
|
---|
6319 | /**
|
---|
6320 | * Emits a code for checking the return code of a call and rcPassUp, returning
|
---|
6321 | * from the code if either are non-zero.
|
---|
6322 | */
|
---|
6323 | DECL_HIDDEN_THROW(uint32_t)
|
---|
6324 | iemNativeEmitCheckCallRetAndPassUp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr)
|
---|
6325 | {
|
---|
6326 | #ifdef RT_ARCH_AMD64
|
---|
6327 | /*
|
---|
6328 | * AMD64: eax = call status code.
|
---|
6329 | */
|
---|
6330 |
|
---|
6331 | /* edx = rcPassUp */
|
---|
6332 | off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, X86_GREG_xDX, RT_UOFFSETOF(VMCPUCC, iem.s.rcPassUp));
|
---|
6333 | # ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
6334 | off = iemNativeEmitLoadGpr8Imm(pReNative, off, X86_GREG_xCX, idxInstr);
|
---|
6335 | # endif
|
---|
6336 |
|
---|
6337 | /* edx = eax | rcPassUp */
|
---|
6338 | uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
|
---|
6339 | pbCodeBuf[off++] = 0x0b; /* or edx, eax */
|
---|
6340 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, X86_GREG_xDX, X86_GREG_xAX);
|
---|
6341 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
6342 |
|
---|
6343 | /* Jump to non-zero status return path. */
|
---|
6344 | off = iemNativeEmitJnzTbExit(pReNative, off, kIemNativeLabelType_NonZeroRetOrPassUp);
|
---|
6345 |
|
---|
6346 | /* done. */
|
---|
6347 |
|
---|
6348 | #elif RT_ARCH_ARM64
|
---|
6349 | /*
|
---|
6350 | * ARM64: w0 = call status code.
|
---|
6351 | */
|
---|
6352 | # ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
6353 | off = iemNativeEmitLoadGprImm64(pReNative, off, ARMV8_A64_REG_X2, idxInstr);
|
---|
6354 | # endif
|
---|
6355 | off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, ARMV8_A64_REG_X3, RT_UOFFSETOF(VMCPUCC, iem.s.rcPassUp));
|
---|
6356 |
|
---|
6357 | uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
|
---|
6358 |
|
---|
6359 | pu32CodeBuf[off++] = Armv8A64MkInstrOrr(ARMV8_A64_REG_X4, ARMV8_A64_REG_X3, ARMV8_A64_REG_X0, false /*f64Bit*/);
|
---|
6360 |
|
---|
6361 | off = iemNativeEmitTestIfGprIsNotZeroAndTbExitEx(pReNative, pu32CodeBuf, off, ARMV8_A64_REG_X4, true /*f64Bit*/,
|
---|
6362 | kIemNativeLabelType_NonZeroRetOrPassUp);
|
---|
6363 |
|
---|
6364 | #else
|
---|
6365 | # error "port me"
|
---|
6366 | #endif
|
---|
6367 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
6368 | RT_NOREF_PV(idxInstr);
|
---|
6369 | return off;
|
---|
6370 | }
|
---|
6371 |
|
---|
6372 |
|
---|
6373 | /**
|
---|
6374 | * Emits a call to a CImpl function or something similar.
|
---|
6375 | */
|
---|
6376 | DECL_HIDDEN_THROW(uint32_t)
|
---|
6377 | iemNativeEmitCImplCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr, uint64_t fGstShwFlush, uintptr_t pfnCImpl,
|
---|
6378 | uint8_t cbInstr, uint8_t cAddParams, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
|
---|
6379 | {
|
---|
6380 | /* Writeback everything. */
|
---|
6381 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
6382 |
|
---|
6383 | /*
|
---|
6384 | * Flush stuff. PC and EFlags are implictly flushed, the latter because we
|
---|
6385 | * don't do with/without flags variants of defer-to-cimpl stuff at the moment.
|
---|
6386 | */
|
---|
6387 | fGstShwFlush = iemNativeCImplFlagsToGuestShadowFlushMask(pReNative->fCImpl,
|
---|
6388 | fGstShwFlush
|
---|
6389 | | RT_BIT_64(kIemNativeGstReg_Pc)
|
---|
6390 | | RT_BIT_64(kIemNativeGstReg_EFlags));
|
---|
6391 | iemNativeRegFlushGuestShadows(pReNative, fGstShwFlush);
|
---|
6392 |
|
---|
6393 | off = iemNativeRegMoveAndFreeAndFlushAtCall(pReNative, off, 4);
|
---|
6394 |
|
---|
6395 | /*
|
---|
6396 | * Load the parameters.
|
---|
6397 | */
|
---|
6398 | #if defined(RT_OS_WINDOWS) && defined(VBOXSTRICTRC_STRICT_ENABLED)
|
---|
6399 | /* Special code the hidden VBOXSTRICTRC pointer. */
|
---|
6400 | off = iemNativeEmitLoadGprFromGpr( pReNative, off, IEMNATIVE_CALL_ARG1_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
6401 | off = iemNativeEmitLoadGprImm64( pReNative, off, IEMNATIVE_CALL_ARG2_GREG, cbInstr); /** @todo 8-bit reg load opt for amd64 */
|
---|
6402 | if (cAddParams > 0)
|
---|
6403 | off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, uParam0);
|
---|
6404 | if (cAddParams > 1)
|
---|
6405 | off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, uParam1);
|
---|
6406 | if (cAddParams > 2)
|
---|
6407 | off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG1, uParam2);
|
---|
6408 | off = iemNativeEmitLeaGprByBp(pReNative, off, X86_GREG_xCX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict */
|
---|
6409 |
|
---|
6410 | #else
|
---|
6411 | AssertCompile(IEMNATIVE_CALL_ARG_GREG_COUNT >= 4);
|
---|
6412 | off = iemNativeEmitLoadGprFromGpr( pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
6413 | off = iemNativeEmitLoadGprImm64( pReNative, off, IEMNATIVE_CALL_ARG1_GREG, cbInstr); /** @todo 8-bit reg load opt for amd64 */
|
---|
6414 | if (cAddParams > 0)
|
---|
6415 | off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, uParam0);
|
---|
6416 | if (cAddParams > 1)
|
---|
6417 | off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, uParam1);
|
---|
6418 | if (cAddParams > 2)
|
---|
6419 | # if IEMNATIVE_CALL_ARG_GREG_COUNT >= 5
|
---|
6420 | off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG4_GREG, uParam2);
|
---|
6421 | # else
|
---|
6422 | off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, uParam2);
|
---|
6423 | # endif
|
---|
6424 | #endif
|
---|
6425 |
|
---|
6426 | /*
|
---|
6427 | * Make the call.
|
---|
6428 | */
|
---|
6429 | off = iemNativeEmitCallImm(pReNative, off, pfnCImpl);
|
---|
6430 |
|
---|
6431 | #if defined(RT_ARCH_AMD64) && defined(VBOXSTRICTRC_STRICT_ENABLED) && defined(RT_OS_WINDOWS)
|
---|
6432 | off = iemNativeEmitLoadGprByBpU32(pReNative, off, X86_GREG_xAX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict (see above) */
|
---|
6433 | #endif
|
---|
6434 |
|
---|
6435 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
|
---|
6436 | pReNative->Core.fDebugPcInitialized = false;
|
---|
6437 | Log4(("fDebugPcInitialized=false cimpl off=%#x (v2)\n", off));
|
---|
6438 | #endif
|
---|
6439 |
|
---|
6440 | /*
|
---|
6441 | * Check the status code.
|
---|
6442 | */
|
---|
6443 | return iemNativeEmitCheckCallRetAndPassUp(pReNative, off, idxInstr);
|
---|
6444 | }
|
---|
6445 |
|
---|
6446 |
|
---|
6447 | /**
|
---|
6448 | * Emits a call to a threaded worker function.
|
---|
6449 | */
|
---|
6450 | DECL_HIDDEN_THROW(uint32_t)
|
---|
6451 | iemNativeEmitThreadedCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry)
|
---|
6452 | {
|
---|
6453 | IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, X86_EFL_STATUS_BITS);
|
---|
6454 |
|
---|
6455 | /* We don't know what the threaded function is doing so we must flush all pending writes. */
|
---|
6456 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
6457 |
|
---|
6458 | iemNativeRegFlushGuestShadows(pReNative, UINT64_MAX); /** @todo optimize this */
|
---|
6459 | off = iemNativeRegMoveAndFreeAndFlushAtCall(pReNative, off, 4);
|
---|
6460 |
|
---|
6461 | #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
6462 | /* The threaded function may throw / long jmp, so set current instruction
|
---|
6463 | number if we're counting. */
|
---|
6464 | off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, pCallEntry->idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
|
---|
6465 | #endif
|
---|
6466 |
|
---|
6467 | uint8_t const cParams = g_acIemThreadedFunctionUsedArgs[pCallEntry->enmFunction];
|
---|
6468 |
|
---|
6469 | #ifdef RT_ARCH_AMD64
|
---|
6470 | /* Load the parameters and emit the call. */
|
---|
6471 | # ifdef RT_OS_WINDOWS
|
---|
6472 | # ifndef VBOXSTRICTRC_STRICT_ENABLED
|
---|
6473 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xCX, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
6474 | if (cParams > 0)
|
---|
6475 | off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xDX, pCallEntry->auParams[0]);
|
---|
6476 | if (cParams > 1)
|
---|
6477 | off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x8, pCallEntry->auParams[1]);
|
---|
6478 | if (cParams > 2)
|
---|
6479 | off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x9, pCallEntry->auParams[2]);
|
---|
6480 | # else /* VBOXSTRICTRC: Returned via hidden parameter. Sigh. */
|
---|
6481 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
6482 | if (cParams > 0)
|
---|
6483 | off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x8, pCallEntry->auParams[0]);
|
---|
6484 | if (cParams > 1)
|
---|
6485 | off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x9, pCallEntry->auParams[1]);
|
---|
6486 | if (cParams > 2)
|
---|
6487 | {
|
---|
6488 | off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x10, pCallEntry->auParams[2]);
|
---|
6489 | off = iemNativeEmitStoreGprByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, X86_GREG_x10);
|
---|
6490 | }
|
---|
6491 | off = iemNativeEmitLeaGprByBp(pReNative, off, X86_GREG_xCX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict */
|
---|
6492 | # endif /* VBOXSTRICTRC_STRICT_ENABLED */
|
---|
6493 | # else
|
---|
6494 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDI, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
6495 | if (cParams > 0)
|
---|
6496 | off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xSI, pCallEntry->auParams[0]);
|
---|
6497 | if (cParams > 1)
|
---|
6498 | off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xDX, pCallEntry->auParams[1]);
|
---|
6499 | if (cParams > 2)
|
---|
6500 | off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xCX, pCallEntry->auParams[2]);
|
---|
6501 | # endif
|
---|
6502 |
|
---|
6503 | off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)g_apfnIemThreadedFunctions[pCallEntry->enmFunction]);
|
---|
6504 |
|
---|
6505 | # if defined(VBOXSTRICTRC_STRICT_ENABLED) && defined(RT_OS_WINDOWS)
|
---|
6506 | off = iemNativeEmitLoadGprByBpU32(pReNative, off, X86_GREG_xAX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict (see above) */
|
---|
6507 | # endif
|
---|
6508 |
|
---|
6509 | #elif RT_ARCH_ARM64
|
---|
6510 | /*
|
---|
6511 | * ARM64:
|
---|
6512 | */
|
---|
6513 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
6514 | if (cParams > 0)
|
---|
6515 | off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, pCallEntry->auParams[0]);
|
---|
6516 | if (cParams > 1)
|
---|
6517 | off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, pCallEntry->auParams[1]);
|
---|
6518 | if (cParams > 2)
|
---|
6519 | off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, pCallEntry->auParams[2]);
|
---|
6520 |
|
---|
6521 | off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)g_apfnIemThreadedFunctions[pCallEntry->enmFunction]);
|
---|
6522 |
|
---|
6523 | #else
|
---|
6524 | # error "port me"
|
---|
6525 | #endif
|
---|
6526 |
|
---|
6527 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
|
---|
6528 | pReNative->Core.fDebugPcInitialized = false;
|
---|
6529 | Log4(("fDebugPcInitialized=false todo off=%#x (v2)\n", off));
|
---|
6530 | #endif
|
---|
6531 |
|
---|
6532 | /*
|
---|
6533 | * Check the status code.
|
---|
6534 | */
|
---|
6535 | off = iemNativeEmitCheckCallRetAndPassUp(pReNative, off, pCallEntry->idxInstr);
|
---|
6536 |
|
---|
6537 | return off;
|
---|
6538 | }
|
---|
6539 |
|
---|
6540 | #ifdef VBOX_WITH_STATISTICS
|
---|
6541 |
|
---|
6542 | /**
|
---|
6543 | * Emits code to update the thread call statistics.
|
---|
6544 | */
|
---|
6545 | DECL_INLINE_THROW(uint32_t)
|
---|
6546 | iemNativeEmitThreadCallStats(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry)
|
---|
6547 | {
|
---|
6548 | /*
|
---|
6549 | * Update threaded function stats.
|
---|
6550 | */
|
---|
6551 | uint32_t const offVCpu = RT_UOFFSETOF_DYN(VMCPUCC, iem.s.acThreadedFuncStats[pCallEntry->enmFunction]);
|
---|
6552 | AssertCompile(sizeof(pReNative->pVCpu->iem.s.acThreadedFuncStats[pCallEntry->enmFunction]) == sizeof(uint32_t));
|
---|
6553 | # if defined(RT_ARCH_ARM64)
|
---|
6554 | uint8_t const idxTmp1 = iemNativeRegAllocTmp(pReNative, &off);
|
---|
6555 | uint8_t const idxTmp2 = iemNativeRegAllocTmp(pReNative, &off);
|
---|
6556 | off = iemNativeEmitIncU32CounterInVCpu(pReNative, off, idxTmp1, idxTmp2, offVCpu);
|
---|
6557 | iemNativeRegFreeTmp(pReNative, idxTmp1);
|
---|
6558 | iemNativeRegFreeTmp(pReNative, idxTmp2);
|
---|
6559 | # else
|
---|
6560 | off = iemNativeEmitIncU32CounterInVCpu(pReNative, off, UINT8_MAX, UINT8_MAX, offVCpu);
|
---|
6561 | # endif
|
---|
6562 | return off;
|
---|
6563 | }
|
---|
6564 |
|
---|
6565 |
|
---|
6566 | /**
|
---|
6567 | * Emits code to update the TB exit reason statistics.
|
---|
6568 | */
|
---|
6569 | DECL_INLINE_THROW(uint32_t)
|
---|
6570 | iemNativeEmitNativeTbExitStats(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t const offVCpu)
|
---|
6571 | {
|
---|
6572 | uint8_t const idxStatsTmp1 = iemNativeRegAllocTmp(pReNative, &off);
|
---|
6573 | uint8_t const idxStatsTmp2 = iemNativeRegAllocTmp(pReNative, &off);
|
---|
6574 | off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, idxStatsTmp1, idxStatsTmp2, offVCpu);
|
---|
6575 | iemNativeRegFreeTmp(pReNative, idxStatsTmp1);
|
---|
6576 | iemNativeRegFreeTmp(pReNative, idxStatsTmp2);
|
---|
6577 |
|
---|
6578 | return off;
|
---|
6579 | }
|
---|
6580 |
|
---|
6581 | #endif /* VBOX_WITH_STATISTICS */
|
---|
6582 |
|
---|
6583 | /**
|
---|
6584 | * Worker for iemNativeEmitViaLookupDoOne and iemNativeRecompileAttachExecMemChunkCtx.
|
---|
6585 | */
|
---|
6586 | static uint32_t
|
---|
6587 | iemNativeEmitCoreViaLookupDoOne(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t offReturnBreak, uintptr_t pfnHelper)
|
---|
6588 | {
|
---|
6589 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
6590 | off = iemNativeEmitCallImm(pReNative, off, pfnHelper);
|
---|
6591 |
|
---|
6592 | /* Jump to ReturnBreak if the return register is NULL. */
|
---|
6593 | off = iemNativeEmitTestIfGprIsZeroAndJmpToFixed(pReNative, off, IEMNATIVE_CALL_RET_GREG,
|
---|
6594 | true /*f64Bit*/, offReturnBreak);
|
---|
6595 |
|
---|
6596 | /* Okay, continue executing the next TB. */
|
---|
6597 | off = iemNativeEmitJmpViaGpr(pReNative, off, IEMNATIVE_CALL_RET_GREG);
|
---|
6598 | return off;
|
---|
6599 | }
|
---|
6600 |
|
---|
6601 | #ifndef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
6602 |
|
---|
6603 | /**
|
---|
6604 | * Worker for iemNativeEmitReturnBreakViaLookup.
|
---|
6605 | */
|
---|
6606 | static uint32_t iemNativeEmitViaLookupDoOne(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t offReturnBreak,
|
---|
6607 | IEMNATIVELABELTYPE enmLabel, uintptr_t pfnHelper)
|
---|
6608 | {
|
---|
6609 | uint32_t const idxLabel = iemNativeLabelFind(pReNative, enmLabel);
|
---|
6610 | if (idxLabel != UINT32_MAX)
|
---|
6611 | {
|
---|
6612 | iemNativeLabelDefine(pReNative, idxLabel, off);
|
---|
6613 | off = iemNativeEmitCoreViaLookupDoOne(pReNative, off, offReturnBreak, pfnHelper);
|
---|
6614 | }
|
---|
6615 | return off;
|
---|
6616 | }
|
---|
6617 |
|
---|
6618 |
|
---|
6619 | /**
|
---|
6620 | * Emits the code at the ReturnBreakViaLookup, ReturnBreakViaLookupWithIrq,
|
---|
6621 | * ReturnBreakViaLookupWithTlb and ReturnBreakViaLookupWithTlbAndIrq labels
|
---|
6622 | * (returns VINF_IEM_REEXEC_FINISH_WITH_FLAGS or jumps to the next TB).
|
---|
6623 | */
|
---|
6624 | static uint32_t iemNativeEmitReturnBreakViaLookup(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnBreakLabel)
|
---|
6625 | {
|
---|
6626 | uint32_t const offReturnBreak = pReNative->paLabels[idxReturnBreakLabel].off;
|
---|
6627 | Assert(offReturnBreak < off);
|
---|
6628 |
|
---|
6629 | /*
|
---|
6630 | * The lookup table index is in IEMNATIVE_CALL_ARG1_GREG for all.
|
---|
6631 | * The GCPhysPc is in IEMNATIVE_CALL_ARG2_GREG for ReturnBreakViaLookupWithPc.
|
---|
6632 | */
|
---|
6633 | off = iemNativeEmitViaLookupDoOne(pReNative, off, offReturnBreak, kIemNativeLabelType_ReturnBreakViaLookup,
|
---|
6634 | (uintptr_t)iemNativeHlpReturnBreakViaLookup<false /*a_fWithIrqCheck*/>);
|
---|
6635 | off = iemNativeEmitViaLookupDoOne(pReNative, off, offReturnBreak, kIemNativeLabelType_ReturnBreakViaLookupWithIrq,
|
---|
6636 | (uintptr_t)iemNativeHlpReturnBreakViaLookup<true /*a_fWithIrqCheck*/>);
|
---|
6637 | off = iemNativeEmitViaLookupDoOne(pReNative, off, offReturnBreak, kIemNativeLabelType_ReturnBreakViaLookupWithTlb,
|
---|
6638 | (uintptr_t)iemNativeHlpReturnBreakViaLookupWithTlb<false /*a_fWithIrqCheck*/>);
|
---|
6639 | off = iemNativeEmitViaLookupDoOne(pReNative, off, offReturnBreak, kIemNativeLabelType_ReturnBreakViaLookupWithTlbAndIrq,
|
---|
6640 | (uintptr_t)iemNativeHlpReturnBreakViaLookupWithTlb<true /*a_fWithIrqCheck*/>);
|
---|
6641 | return off;
|
---|
6642 | }
|
---|
6643 |
|
---|
6644 | #endif /* !IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE */
|
---|
6645 |
|
---|
6646 | /**
|
---|
6647 | * Emits the code at the ReturnWithFlags label (returns VINF_IEM_REEXEC_FINISH_WITH_FLAGS).
|
---|
6648 | */
|
---|
6649 | static uint32_t iemNativeEmitCoreReturnWithFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
6650 | {
|
---|
6651 | /* set the return status */
|
---|
6652 | return iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_RET_GREG, VINF_IEM_REEXEC_FINISH_WITH_FLAGS);
|
---|
6653 | }
|
---|
6654 |
|
---|
6655 |
|
---|
6656 | #ifndef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
6657 | /**
|
---|
6658 | * Emits the code at the ReturnWithFlags label (returns VINF_IEM_REEXEC_FINISH_WITH_FLAGS).
|
---|
6659 | */
|
---|
6660 | static uint32_t iemNativeEmitReturnWithFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel)
|
---|
6661 | {
|
---|
6662 | uint32_t const idxLabel = iemNativeLabelFind(pReNative, kIemNativeLabelType_ReturnWithFlags);
|
---|
6663 | if (idxLabel != UINT32_MAX)
|
---|
6664 | {
|
---|
6665 | iemNativeLabelDefine(pReNative, idxLabel, off);
|
---|
6666 | /* set the return status */
|
---|
6667 | off = iemNativeEmitCoreReturnWithFlags(pReNative, off);
|
---|
6668 | /* jump back to the return sequence. */
|
---|
6669 | off = iemNativeEmitJmpToLabel(pReNative, off, idxReturnLabel);
|
---|
6670 | }
|
---|
6671 | return off;
|
---|
6672 | }
|
---|
6673 | #endif
|
---|
6674 |
|
---|
6675 |
|
---|
6676 | /**
|
---|
6677 | * Emits the code at the ReturnBreakFF label (returns VINF_IEM_REEXEC_BREAK_FF).
|
---|
6678 | */
|
---|
6679 | static uint32_t iemNativeEmitCoreReturnBreakFF(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
6680 | {
|
---|
6681 | /* set the return status */
|
---|
6682 | return iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_RET_GREG, VINF_IEM_REEXEC_BREAK_FF);
|
---|
6683 | }
|
---|
6684 |
|
---|
6685 |
|
---|
6686 | #ifndef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
6687 | /**
|
---|
6688 | * Emits the code at the ReturnBreakFF label (returns VINF_IEM_REEXEC_BREAK_FF).
|
---|
6689 | */
|
---|
6690 | static uint32_t iemNativeEmitReturnBreakFF(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel)
|
---|
6691 | {
|
---|
6692 | uint32_t const idxLabel = iemNativeLabelFind(pReNative, kIemNativeLabelType_ReturnBreakFF);
|
---|
6693 | if (idxLabel != UINT32_MAX)
|
---|
6694 | {
|
---|
6695 | iemNativeLabelDefine(pReNative, idxLabel, off);
|
---|
6696 | /* set the return status */
|
---|
6697 | off = iemNativeEmitCoreReturnBreakFF(pReNative, off);
|
---|
6698 | /* jump back to the return sequence. */
|
---|
6699 | off = iemNativeEmitJmpToLabel(pReNative, off, idxReturnLabel);
|
---|
6700 | }
|
---|
6701 | return off;
|
---|
6702 | }
|
---|
6703 | #endif
|
---|
6704 |
|
---|
6705 |
|
---|
6706 | /**
|
---|
6707 | * Emits the code at the ReturnBreak label (returns VINF_IEM_REEXEC_BREAK).
|
---|
6708 | */
|
---|
6709 | static uint32_t iemNativeEmitCoreReturnBreak(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
6710 | {
|
---|
6711 | /* set the return status */
|
---|
6712 | return iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_RET_GREG, VINF_IEM_REEXEC_BREAK);
|
---|
6713 | }
|
---|
6714 |
|
---|
6715 |
|
---|
6716 | #ifndef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
6717 | /**
|
---|
6718 | * Emits the code at the ReturnBreak label (returns VINF_IEM_REEXEC_BREAK).
|
---|
6719 | */
|
---|
6720 | static uint32_t iemNativeEmitReturnBreak(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel)
|
---|
6721 | {
|
---|
6722 | uint32_t const idxLabel = iemNativeLabelFind(pReNative, kIemNativeLabelType_ReturnBreak);
|
---|
6723 | if (idxLabel != UINT32_MAX)
|
---|
6724 | {
|
---|
6725 | iemNativeLabelDefine(pReNative, idxLabel, off);
|
---|
6726 | /* set the return status */
|
---|
6727 | off = iemNativeEmitCoreReturnBreak(pReNative, off);
|
---|
6728 | /* jump back to the return sequence. */
|
---|
6729 | off = iemNativeEmitJmpToLabel(pReNative, off, idxReturnLabel);
|
---|
6730 | }
|
---|
6731 | return off;
|
---|
6732 | }
|
---|
6733 | #endif
|
---|
6734 |
|
---|
6735 |
|
---|
6736 | /**
|
---|
6737 | * Emits the RC fiddling code for handling non-zero return code or rcPassUp.
|
---|
6738 | */
|
---|
6739 | static uint32_t iemNativeEmitCoreRcFiddling(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
6740 | {
|
---|
6741 | /*
|
---|
6742 | * Generate the rc + rcPassUp fiddling code.
|
---|
6743 | */
|
---|
6744 | /* iemNativeHlpExecStatusCodeFiddling(PVMCPUCC pVCpu, int rc, uint8_t idxInstr) */
|
---|
6745 | #ifdef RT_ARCH_AMD64
|
---|
6746 | # ifdef RT_OS_WINDOWS
|
---|
6747 | # ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
6748 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_x8, X86_GREG_xCX); /* cl = instruction number */
|
---|
6749 | # endif
|
---|
6750 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xCX, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
6751 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, X86_GREG_xAX);
|
---|
6752 | # else
|
---|
6753 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDI, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
6754 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xSI, X86_GREG_xAX);
|
---|
6755 | # ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
6756 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, X86_GREG_xCX); /* cl = instruction number */
|
---|
6757 | # endif
|
---|
6758 | # endif
|
---|
6759 | # ifndef IEMNATIVE_WITH_INSTRUCTION_COUNTING
|
---|
6760 | off = iemNativeEmitLoadGpr8Imm(pReNative, off, X86_GREG_xCX, 0);
|
---|
6761 | # endif
|
---|
6762 |
|
---|
6763 | #else
|
---|
6764 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, IEMNATIVE_CALL_RET_GREG);
|
---|
6765 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
6766 | /* IEMNATIVE_CALL_ARG2_GREG is already set. */
|
---|
6767 | #endif
|
---|
6768 |
|
---|
6769 | off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)iemNativeHlpExecStatusCodeFiddling);
|
---|
6770 | return off;
|
---|
6771 | }
|
---|
6772 |
|
---|
6773 |
|
---|
6774 | #ifndef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
6775 | /**
|
---|
6776 | * Emits the RC fiddling code for handling non-zero return code or rcPassUp.
|
---|
6777 | */
|
---|
6778 | static uint32_t iemNativeEmitRcFiddling(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel)
|
---|
6779 | {
|
---|
6780 | /*
|
---|
6781 | * Generate the rc + rcPassUp fiddling code if needed.
|
---|
6782 | */
|
---|
6783 | uint32_t const idxLabel = iemNativeLabelFind(pReNative, kIemNativeLabelType_NonZeroRetOrPassUp);
|
---|
6784 | if (idxLabel != UINT32_MAX)
|
---|
6785 | {
|
---|
6786 | iemNativeLabelDefine(pReNative, idxLabel, off);
|
---|
6787 | off = iemNativeEmitCoreRcFiddling(pReNative, off);
|
---|
6788 | off = iemNativeEmitJmpToLabel(pReNative, off, idxReturnLabel);
|
---|
6789 | }
|
---|
6790 | return off;
|
---|
6791 | }
|
---|
6792 | #endif
|
---|
6793 |
|
---|
6794 |
|
---|
6795 | /**
|
---|
6796 | * Emits a standard epilog.
|
---|
6797 | */
|
---|
6798 | static uint32_t iemNativeEmitCoreEpilog(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
6799 | {
|
---|
6800 | pReNative->Core.bmHstRegs |= RT_BIT_32(IEMNATIVE_CALL_RET_GREG); /* HACK: For IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK (return register is already set to status code). */
|
---|
6801 |
|
---|
6802 | IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, X86_EFL_STATUS_BITS);
|
---|
6803 |
|
---|
6804 | /* HACK: For IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK (return register is already set to status code). */
|
---|
6805 | pReNative->Core.bmHstRegs &= ~RT_BIT_32(IEMNATIVE_CALL_RET_GREG);
|
---|
6806 |
|
---|
6807 | /*
|
---|
6808 | * Restore registers and return.
|
---|
6809 | */
|
---|
6810 | #ifdef RT_ARCH_AMD64
|
---|
6811 | uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20);
|
---|
6812 |
|
---|
6813 | /* Reposition esp at the r15 restore point. */
|
---|
6814 | pbCodeBuf[off++] = X86_OP_REX_W;
|
---|
6815 | pbCodeBuf[off++] = 0x8d; /* lea rsp, [rbp - (gcc ? 5 : 7) * 8] */
|
---|
6816 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, X86_GREG_xSP, X86_GREG_xBP);
|
---|
6817 | pbCodeBuf[off++] = (uint8_t)IEMNATIVE_FP_OFF_LAST_PUSH;
|
---|
6818 |
|
---|
6819 | /* Pop non-volatile registers and return */
|
---|
6820 | pbCodeBuf[off++] = X86_OP_REX_B; /* pop r15 */
|
---|
6821 | pbCodeBuf[off++] = 0x58 + X86_GREG_x15 - 8;
|
---|
6822 | pbCodeBuf[off++] = X86_OP_REX_B; /* pop r14 */
|
---|
6823 | pbCodeBuf[off++] = 0x58 + X86_GREG_x14 - 8;
|
---|
6824 | pbCodeBuf[off++] = X86_OP_REX_B; /* pop r13 */
|
---|
6825 | pbCodeBuf[off++] = 0x58 + X86_GREG_x13 - 8;
|
---|
6826 | pbCodeBuf[off++] = X86_OP_REX_B; /* pop r12 */
|
---|
6827 | pbCodeBuf[off++] = 0x58 + X86_GREG_x12 - 8;
|
---|
6828 | # ifdef RT_OS_WINDOWS
|
---|
6829 | pbCodeBuf[off++] = 0x58 + X86_GREG_xDI; /* pop rdi */
|
---|
6830 | pbCodeBuf[off++] = 0x58 + X86_GREG_xSI; /* pop rsi */
|
---|
6831 | # endif
|
---|
6832 | pbCodeBuf[off++] = 0x58 + X86_GREG_xBX; /* pop rbx */
|
---|
6833 | pbCodeBuf[off++] = 0xc9; /* leave */
|
---|
6834 | pbCodeBuf[off++] = 0xc3; /* ret */
|
---|
6835 | pbCodeBuf[off++] = 0xcc; /* int3 poison */
|
---|
6836 |
|
---|
6837 | #elif RT_ARCH_ARM64
|
---|
6838 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
|
---|
6839 |
|
---|
6840 | /* ldp x19, x20, [sp #IEMNATIVE_FRAME_VAR_SIZE]! ; Unallocate the variable space and restore x19+x20. */
|
---|
6841 | AssertCompile(IEMNATIVE_FRAME_VAR_SIZE < 64*8);
|
---|
6842 | pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_PreIndex,
|
---|
6843 | ARMV8_A64_REG_X19, ARMV8_A64_REG_X20, ARMV8_A64_REG_SP,
|
---|
6844 | IEMNATIVE_FRAME_VAR_SIZE / 8);
|
---|
6845 | /* Restore x21 thru x28 + BP and LR (ret address) (SP remains unchanged in the kSigned variant). */
|
---|
6846 | pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
|
---|
6847 | ARMV8_A64_REG_X21, ARMV8_A64_REG_X22, ARMV8_A64_REG_SP, 2);
|
---|
6848 | pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
|
---|
6849 | ARMV8_A64_REG_X23, ARMV8_A64_REG_X24, ARMV8_A64_REG_SP, 4);
|
---|
6850 | pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
|
---|
6851 | ARMV8_A64_REG_X25, ARMV8_A64_REG_X26, ARMV8_A64_REG_SP, 6);
|
---|
6852 | pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
|
---|
6853 | ARMV8_A64_REG_X27, ARMV8_A64_REG_X28, ARMV8_A64_REG_SP, 8);
|
---|
6854 | pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
|
---|
6855 | ARMV8_A64_REG_BP, ARMV8_A64_REG_LR, ARMV8_A64_REG_SP, 10);
|
---|
6856 | AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE / 8 == 12);
|
---|
6857 |
|
---|
6858 | /* add sp, sp, IEMNATIVE_FRAME_SAVE_REG_SIZE ; */
|
---|
6859 | AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE < 4096);
|
---|
6860 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, ARMV8_A64_REG_SP, ARMV8_A64_REG_SP,
|
---|
6861 | IEMNATIVE_FRAME_SAVE_REG_SIZE);
|
---|
6862 |
|
---|
6863 | /* retab / ret */
|
---|
6864 | # ifdef RT_OS_DARWIN /** @todo See todo on pacibsp in the prolog. */
|
---|
6865 | if (1)
|
---|
6866 | pu32CodeBuf[off++] = ARMV8_A64_INSTR_RETAB;
|
---|
6867 | else
|
---|
6868 | # endif
|
---|
6869 | pu32CodeBuf[off++] = ARMV8_A64_INSTR_RET;
|
---|
6870 |
|
---|
6871 | #else
|
---|
6872 | # error "port me"
|
---|
6873 | #endif
|
---|
6874 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
6875 |
|
---|
6876 | /* HACK: For IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK. */
|
---|
6877 | pReNative->Core.bmHstRegs &= ~RT_BIT_32(IEMNATIVE_CALL_RET_GREG);
|
---|
6878 |
|
---|
6879 | return off;
|
---|
6880 | }
|
---|
6881 |
|
---|
6882 |
|
---|
6883 | #ifndef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
6884 | /**
|
---|
6885 | * Emits a standard epilog.
|
---|
6886 | */
|
---|
6887 | static uint32_t iemNativeEmitEpilog(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t *pidxReturnLabel)
|
---|
6888 | {
|
---|
6889 | /*
|
---|
6890 | * Define label for common return point.
|
---|
6891 | */
|
---|
6892 | *pidxReturnLabel = UINT32_MAX;
|
---|
6893 | uint32_t const idxReturn = iemNativeLabelCreate(pReNative, kIemNativeLabelType_Return, off);
|
---|
6894 | *pidxReturnLabel = idxReturn;
|
---|
6895 |
|
---|
6896 | /*
|
---|
6897 | * Emit the code.
|
---|
6898 | */
|
---|
6899 | return iemNativeEmitCoreEpilog(pReNative, off);
|
---|
6900 | }
|
---|
6901 | #endif
|
---|
6902 |
|
---|
6903 |
|
---|
6904 | #ifndef IEMNATIVE_WITH_RECOMPILER_PROLOGUE_SINGLETON
|
---|
6905 | /**
|
---|
6906 | * Emits a standard prolog.
|
---|
6907 | */
|
---|
6908 | static uint32_t iemNativeEmitProlog(PIEMRECOMPILERSTATE pReNative, uint32_t off)
|
---|
6909 | {
|
---|
6910 | #ifdef RT_ARCH_AMD64
|
---|
6911 | /*
|
---|
6912 | * Set up a regular xBP stack frame, pushing all non-volatile GPRs,
|
---|
6913 | * reserving 64 bytes for stack variables plus 4 non-register argument
|
---|
6914 | * slots. Fixed register assignment: xBX = pReNative;
|
---|
6915 | *
|
---|
6916 | * Since we always do the same register spilling, we can use the same
|
---|
6917 | * unwind description for all the code.
|
---|
6918 | */
|
---|
6919 | uint8_t *const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);
|
---|
6920 | pbCodeBuf[off++] = 0x50 + X86_GREG_xBP; /* push rbp */
|
---|
6921 | pbCodeBuf[off++] = X86_OP_REX_W; /* mov rbp, rsp */
|
---|
6922 | pbCodeBuf[off++] = 0x8b;
|
---|
6923 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, X86_GREG_xBP, X86_GREG_xSP);
|
---|
6924 | pbCodeBuf[off++] = 0x50 + X86_GREG_xBX; /* push rbx */
|
---|
6925 | AssertCompile(IEMNATIVE_REG_FIXED_PVMCPU == X86_GREG_xBX);
|
---|
6926 | # ifdef RT_OS_WINDOWS
|
---|
6927 | pbCodeBuf[off++] = X86_OP_REX_W; /* mov rbx, rcx ; RBX = pVCpu */
|
---|
6928 | pbCodeBuf[off++] = 0x8b;
|
---|
6929 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, X86_GREG_xBX, X86_GREG_xCX);
|
---|
6930 | pbCodeBuf[off++] = 0x50 + X86_GREG_xSI; /* push rsi */
|
---|
6931 | pbCodeBuf[off++] = 0x50 + X86_GREG_xDI; /* push rdi */
|
---|
6932 | # else
|
---|
6933 | pbCodeBuf[off++] = X86_OP_REX_W; /* mov rbx, rdi ; RBX = pVCpu */
|
---|
6934 | pbCodeBuf[off++] = 0x8b;
|
---|
6935 | pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, X86_GREG_xBX, X86_GREG_xDI);
|
---|
6936 | # endif
|
---|
6937 | pbCodeBuf[off++] = X86_OP_REX_B; /* push r12 */
|
---|
6938 | pbCodeBuf[off++] = 0x50 + X86_GREG_x12 - 8;
|
---|
6939 | pbCodeBuf[off++] = X86_OP_REX_B; /* push r13 */
|
---|
6940 | pbCodeBuf[off++] = 0x50 + X86_GREG_x13 - 8;
|
---|
6941 | pbCodeBuf[off++] = X86_OP_REX_B; /* push r14 */
|
---|
6942 | pbCodeBuf[off++] = 0x50 + X86_GREG_x14 - 8;
|
---|
6943 | pbCodeBuf[off++] = X86_OP_REX_B; /* push r15 */
|
---|
6944 | pbCodeBuf[off++] = 0x50 + X86_GREG_x15 - 8;
|
---|
6945 |
|
---|
6946 | # ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
|
---|
6947 | /* Save the frame pointer. */
|
---|
6948 | off = iemNativeEmitStoreGprToVCpuU64Ex(pbCodeBuf, off, X86_GREG_xBP, RT_UOFFSETOF(VMCPUCC, iem.s.pvTbFramePointerR3));
|
---|
6949 | # endif
|
---|
6950 |
|
---|
6951 | off = iemNativeEmitSubGprImm(pReNative, off, /* sub rsp, byte 28h */
|
---|
6952 | X86_GREG_xSP,
|
---|
6953 | IEMNATIVE_FRAME_ALIGN_SIZE
|
---|
6954 | + IEMNATIVE_FRAME_VAR_SIZE
|
---|
6955 | + IEMNATIVE_FRAME_STACK_ARG_COUNT * 8
|
---|
6956 | + IEMNATIVE_FRAME_SHADOW_ARG_COUNT * 8);
|
---|
6957 | AssertCompile(!(IEMNATIVE_FRAME_VAR_SIZE & 0xf));
|
---|
6958 | AssertCompile(!(IEMNATIVE_FRAME_STACK_ARG_COUNT & 0x1));
|
---|
6959 | AssertCompile(!(IEMNATIVE_FRAME_SHADOW_ARG_COUNT & 0x1));
|
---|
6960 |
|
---|
6961 | #elif RT_ARCH_ARM64
|
---|
6962 | /*
|
---|
6963 | * We set up a stack frame exactly like on x86, only we have to push the
|
---|
6964 | * return address our selves here. We save all non-volatile registers.
|
---|
6965 | */
|
---|
6966 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 16);
|
---|
6967 |
|
---|
6968 | # ifdef RT_OS_DARWIN /** @todo This seems to be requirement by libunwind for JIT FDEs. Investigate further as been unable
|
---|
6969 | * to figure out where the BRK following AUTHB*+XPACB* stuff comes from in libunwind. It's
|
---|
6970 | * definitely the dwarf stepping code, but till found it's very tedious to figure out whether it's
|
---|
6971 | * in any way conditional, so just emitting this instructions now and hoping for the best... */
|
---|
6972 | /* pacibsp */
|
---|
6973 | pu32CodeBuf[off++] = ARMV8_A64_INSTR_PACIBSP;
|
---|
6974 | # endif
|
---|
6975 |
|
---|
6976 | /* stp x19, x20, [sp, #-IEMNATIVE_FRAME_SAVE_REG_SIZE] ; Allocate space for saving registers and place x19+x20 at the bottom. */
|
---|
6977 | AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE < 64*8);
|
---|
6978 | pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_PreIndex,
|
---|
6979 | ARMV8_A64_REG_X19, ARMV8_A64_REG_X20, ARMV8_A64_REG_SP,
|
---|
6980 | -IEMNATIVE_FRAME_SAVE_REG_SIZE / 8);
|
---|
6981 | /* Save x21 thru x28 (SP remains unchanged in the kSigned variant). */
|
---|
6982 | pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
|
---|
6983 | ARMV8_A64_REG_X21, ARMV8_A64_REG_X22, ARMV8_A64_REG_SP, 2);
|
---|
6984 | pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
|
---|
6985 | ARMV8_A64_REG_X23, ARMV8_A64_REG_X24, ARMV8_A64_REG_SP, 4);
|
---|
6986 | pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
|
---|
6987 | ARMV8_A64_REG_X25, ARMV8_A64_REG_X26, ARMV8_A64_REG_SP, 6);
|
---|
6988 | pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
|
---|
6989 | ARMV8_A64_REG_X27, ARMV8_A64_REG_X28, ARMV8_A64_REG_SP, 8);
|
---|
6990 | /* Save the BP and LR (ret address) registers at the top of the frame. */
|
---|
6991 | pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
|
---|
6992 | ARMV8_A64_REG_BP, ARMV8_A64_REG_LR, ARMV8_A64_REG_SP, 10);
|
---|
6993 | AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE / 8 == 12);
|
---|
6994 | /* add bp, sp, IEMNATIVE_FRAME_SAVE_REG_SIZE - 16 ; Set BP to point to the old BP stack address. */
|
---|
6995 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, ARMV8_A64_REG_BP,
|
---|
6996 | ARMV8_A64_REG_SP, IEMNATIVE_FRAME_SAVE_REG_SIZE - 16);
|
---|
6997 |
|
---|
6998 | /* sub sp, sp, IEMNATIVE_FRAME_VAR_SIZE ; Allocate the variable area from SP. */
|
---|
6999 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_SP, ARMV8_A64_REG_SP, IEMNATIVE_FRAME_VAR_SIZE);
|
---|
7000 |
|
---|
7001 | /* mov r28, r0 */
|
---|
7002 | off = iemNativeEmitLoadGprFromGprEx(pu32CodeBuf, off, IEMNATIVE_REG_FIXED_PVMCPU, IEMNATIVE_CALL_ARG0_GREG);
|
---|
7003 | /* mov r27, r1 */
|
---|
7004 | off = iemNativeEmitLoadGprFromGprEx(pu32CodeBuf, off, IEMNATIVE_REG_FIXED_PCPUMCTX, IEMNATIVE_CALL_ARG1_GREG);
|
---|
7005 |
|
---|
7006 | # ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
|
---|
7007 | /* Save the frame pointer. */
|
---|
7008 | off = iemNativeEmitStoreGprToVCpuU64Ex(pu32CodeBuf, off, ARMV8_A64_REG_BP, RT_UOFFSETOF(VMCPUCC, iem.s.pvTbFramePointerR3),
|
---|
7009 | ARMV8_A64_REG_X2);
|
---|
7010 | # endif
|
---|
7011 |
|
---|
7012 | #else
|
---|
7013 | # error "port me"
|
---|
7014 | #endif
|
---|
7015 | IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
|
---|
7016 | return off;
|
---|
7017 | }
|
---|
7018 | #endif
|
---|
7019 |
|
---|
7020 |
|
---|
7021 | /*********************************************************************************************************************************
|
---|
7022 | * Emitters for IEM_MC_ARG_XXX, IEM_MC_LOCAL, IEM_MC_LOCAL_CONST, ++ *
|
---|
7023 | *********************************************************************************************************************************/
|
---|
7024 |
|
---|
7025 | /**
|
---|
7026 | * Internal work that allocates a variable with kind set to
|
---|
7027 | * kIemNativeVarKind_Invalid and no current stack allocation.
|
---|
7028 | *
|
---|
7029 | * The kind will either be set by the caller or later when the variable is first
|
---|
7030 | * assigned a value.
|
---|
7031 | *
|
---|
7032 | * @returns Unpacked index.
|
---|
7033 | * @internal
|
---|
7034 | */
|
---|
7035 | static uint8_t iemNativeVarAllocInt(PIEMRECOMPILERSTATE pReNative, uint8_t cbType)
|
---|
7036 | {
|
---|
7037 | Assert(cbType > 0 && cbType <= 64);
|
---|
7038 | unsigned const idxVar = ASMBitFirstSetU32(~pReNative->Core.bmVars) - 1;
|
---|
7039 | AssertStmt(idxVar < RT_ELEMENTS(pReNative->Core.aVars), IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_EXHAUSTED));
|
---|
7040 | pReNative->Core.bmVars |= RT_BIT_32(idxVar);
|
---|
7041 | pReNative->Core.aVars[idxVar].enmKind = kIemNativeVarKind_Invalid;
|
---|
7042 | pReNative->Core.aVars[idxVar].cbVar = cbType;
|
---|
7043 | pReNative->Core.aVars[idxVar].idxStackSlot = UINT8_MAX;
|
---|
7044 | pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX;
|
---|
7045 | pReNative->Core.aVars[idxVar].uArgNo = UINT8_MAX;
|
---|
7046 | pReNative->Core.aVars[idxVar].idxReferrerVar = UINT8_MAX;
|
---|
7047 | pReNative->Core.aVars[idxVar].enmGstReg = kIemNativeGstReg_End;
|
---|
7048 | pReNative->Core.aVars[idxVar].fRegAcquired = false;
|
---|
7049 | pReNative->Core.aVars[idxVar].u.uValue = 0;
|
---|
7050 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
7051 | pReNative->Core.aVars[idxVar].fSimdReg = false;
|
---|
7052 | #endif
|
---|
7053 | return idxVar;
|
---|
7054 | }
|
---|
7055 |
|
---|
7056 |
|
---|
7057 | /**
|
---|
7058 | * Internal work that allocates an argument variable w/o setting enmKind.
|
---|
7059 | *
|
---|
7060 | * @returns Unpacked index.
|
---|
7061 | * @internal
|
---|
7062 | */
|
---|
7063 | static uint8_t iemNativeArgAllocInt(PIEMRECOMPILERSTATE pReNative, uint8_t iArgNo, uint8_t cbType)
|
---|
7064 | {
|
---|
7065 | iArgNo += iemNativeArgGetHiddenArgCount(pReNative);
|
---|
7066 | AssertStmt(iArgNo < RT_ELEMENTS(pReNative->Core.aidxArgVars), IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_1));
|
---|
7067 | AssertStmt(pReNative->Core.aidxArgVars[iArgNo] == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_DUP_ARG_NO));
|
---|
7068 |
|
---|
7069 | uint8_t const idxVar = iemNativeVarAllocInt(pReNative, cbType);
|
---|
7070 | pReNative->Core.aidxArgVars[iArgNo] = idxVar; /* (unpacked) */
|
---|
7071 | pReNative->Core.aVars[idxVar].uArgNo = iArgNo;
|
---|
7072 | return idxVar;
|
---|
7073 | }
|
---|
7074 |
|
---|
7075 |
|
---|
7076 | /**
|
---|
7077 | * Gets the stack slot for a stack variable, allocating one if necessary.
|
---|
7078 | *
|
---|
7079 | * Calling this function implies that the stack slot will contain a valid
|
---|
7080 | * variable value. The caller deals with any register currently assigned to the
|
---|
7081 | * variable, typically by spilling it into the stack slot.
|
---|
7082 | *
|
---|
7083 | * @returns The stack slot number.
|
---|
7084 | * @param pReNative The recompiler state.
|
---|
7085 | * @param idxVar The variable.
|
---|
7086 | * @throws VERR_IEM_VAR_OUT_OF_STACK_SLOTS
|
---|
7087 | */
|
---|
7088 | DECL_HIDDEN_THROW(uint8_t) iemNativeVarGetStackSlot(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
|
---|
7089 | {
|
---|
7090 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
7091 | PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
|
---|
7092 | Assert(pVar->enmKind == kIemNativeVarKind_Stack);
|
---|
7093 |
|
---|
7094 | /* Already got a slot? */
|
---|
7095 | uint8_t const idxStackSlot = pVar->idxStackSlot;
|
---|
7096 | if (idxStackSlot != UINT8_MAX)
|
---|
7097 | {
|
---|
7098 | Assert(idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS);
|
---|
7099 | return idxStackSlot;
|
---|
7100 | }
|
---|
7101 |
|
---|
7102 | /*
|
---|
7103 | * A single slot is easy to allocate.
|
---|
7104 | * Allocate them from the top end, closest to BP, to reduce the displacement.
|
---|
7105 | */
|
---|
7106 | if (pVar->cbVar <= sizeof(uint64_t))
|
---|
7107 | {
|
---|
7108 | unsigned const iSlot = ASMBitLastSetU32(~pReNative->Core.bmStack) - 1;
|
---|
7109 | AssertStmt(iSlot < IEMNATIVE_FRAME_VAR_SLOTS, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_OUT_OF_STACK_SLOTS));
|
---|
7110 | pReNative->Core.bmStack |= RT_BIT_32(iSlot);
|
---|
7111 | pVar->idxStackSlot = (uint8_t)iSlot;
|
---|
7112 | Log11(("iemNativeVarGetStackSlot: idxVar=%#x iSlot=%#x\n", idxVar, iSlot));
|
---|
7113 | return (uint8_t)iSlot;
|
---|
7114 | }
|
---|
7115 |
|
---|
7116 | /*
|
---|
7117 | * We need more than one stack slot.
|
---|
7118 | *
|
---|
7119 | * cbVar -> fBitAlignMask: 16 -> 1; 32 -> 3; 64 -> 7;
|
---|
7120 | */
|
---|
7121 | AssertCompile(RT_IS_POWER_OF_TWO(IEMNATIVE_FRAME_VAR_SLOTS)); /* If not we have to add an overflow check. */
|
---|
7122 | Assert(pVar->cbVar <= 64);
|
---|
7123 | uint32_t const fBitAlignMask = RT_BIT_32(ASMBitLastSetU32(pVar->cbVar) - 4) - 1;
|
---|
7124 | uint32_t fBitAllocMask = RT_BIT_32((pVar->cbVar + 7) >> 3) - 1;
|
---|
7125 | uint32_t bmStack = pReNative->Core.bmStack;
|
---|
7126 | while (bmStack != UINT32_MAX)
|
---|
7127 | {
|
---|
7128 | unsigned iSlot = ASMBitLastSetU32(~bmStack);
|
---|
7129 | AssertStmt(iSlot, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_OUT_OF_STACK_SLOTS));
|
---|
7130 | iSlot = (iSlot - 1) & ~fBitAlignMask;
|
---|
7131 | if ((bmStack & ~(fBitAllocMask << iSlot)) == bmStack)
|
---|
7132 | {
|
---|
7133 | pReNative->Core.bmStack |= (fBitAllocMask << iSlot);
|
---|
7134 | pVar->idxStackSlot = (uint8_t)iSlot;
|
---|
7135 | Log11(("iemNativeVarGetStackSlot: idxVar=%#x iSlot=%#x/%#x (cbVar=%#x)\n",
|
---|
7136 | idxVar, iSlot, fBitAllocMask, pVar->cbVar));
|
---|
7137 | return (uint8_t)iSlot;
|
---|
7138 | }
|
---|
7139 |
|
---|
7140 | bmStack |= (fBitAllocMask << iSlot);
|
---|
7141 | }
|
---|
7142 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_OUT_OF_STACK_SLOTS));
|
---|
7143 | }
|
---|
7144 |
|
---|
7145 |
|
---|
7146 | /**
|
---|
7147 | * Changes the variable to a stack variable.
|
---|
7148 | *
|
---|
7149 | * Currently this is s only possible to do the first time the variable is used,
|
---|
7150 | * switching later is can be implemented but not done.
|
---|
7151 | *
|
---|
7152 | * @param pReNative The recompiler state.
|
---|
7153 | * @param idxVar The variable.
|
---|
7154 | * @throws VERR_IEM_VAR_IPE_2
|
---|
7155 | */
|
---|
7156 | DECL_HIDDEN_THROW(void) iemNativeVarSetKindToStack(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
|
---|
7157 | {
|
---|
7158 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
7159 | PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
|
---|
7160 | if (pVar->enmKind != kIemNativeVarKind_Stack)
|
---|
7161 | {
|
---|
7162 | /* We could in theory transition from immediate to stack as well, but it
|
---|
7163 | would involve the caller doing work storing the value on the stack. So,
|
---|
7164 | till that's required we only allow transition from invalid. */
|
---|
7165 | AssertStmt(pVar->enmKind == kIemNativeVarKind_Invalid, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
|
---|
7166 | AssertStmt(pVar->idxReg == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
|
---|
7167 | pVar->enmKind = kIemNativeVarKind_Stack;
|
---|
7168 |
|
---|
7169 | /* Note! We don't allocate a stack slot here, that's only done when a
|
---|
7170 | slot is actually needed to hold a variable value. */
|
---|
7171 | }
|
---|
7172 | }
|
---|
7173 |
|
---|
7174 |
|
---|
7175 | /**
|
---|
7176 | * Sets it to a variable with a constant value.
|
---|
7177 | *
|
---|
7178 | * This does not require stack storage as we know the value and can always
|
---|
7179 | * reload it, unless of course it's referenced.
|
---|
7180 | *
|
---|
7181 | * @param pReNative The recompiler state.
|
---|
7182 | * @param idxVar The variable.
|
---|
7183 | * @param uValue The immediate value.
|
---|
7184 | * @throws VERR_IEM_VAR_OUT_OF_STACK_SLOTS, VERR_IEM_VAR_IPE_2
|
---|
7185 | */
|
---|
7186 | DECL_HIDDEN_THROW(void) iemNativeVarSetKindToConst(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, uint64_t uValue)
|
---|
7187 | {
|
---|
7188 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
7189 | PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
|
---|
7190 | if (pVar->enmKind != kIemNativeVarKind_Immediate)
|
---|
7191 | {
|
---|
7192 | /* Only simple transitions for now. */
|
---|
7193 | AssertStmt(pVar->enmKind == kIemNativeVarKind_Invalid, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
|
---|
7194 | pVar->enmKind = kIemNativeVarKind_Immediate;
|
---|
7195 | }
|
---|
7196 | AssertStmt(pVar->idxReg == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
|
---|
7197 |
|
---|
7198 | pVar->u.uValue = uValue;
|
---|
7199 | AssertMsg( pVar->cbVar >= sizeof(uint64_t)
|
---|
7200 | || pVar->u.uValue < RT_BIT_64(pVar->cbVar * 8),
|
---|
7201 | ("idxVar=%d cbVar=%u uValue=%#RX64\n", idxVar, pVar->cbVar, uValue));
|
---|
7202 | }
|
---|
7203 |
|
---|
7204 |
|
---|
7205 | /**
|
---|
7206 | * Sets the variable to a reference (pointer) to @a idxOtherVar.
|
---|
7207 | *
|
---|
7208 | * This does not require stack storage as we know the value and can always
|
---|
7209 | * reload it. Loading is postponed till needed.
|
---|
7210 | *
|
---|
7211 | * @param pReNative The recompiler state.
|
---|
7212 | * @param idxVar The variable. Unpacked.
|
---|
7213 | * @param idxOtherVar The variable to take the (stack) address of. Unpacked.
|
---|
7214 | *
|
---|
7215 | * @throws VERR_IEM_VAR_OUT_OF_STACK_SLOTS, VERR_IEM_VAR_IPE_2
|
---|
7216 | * @internal
|
---|
7217 | */
|
---|
7218 | static void iemNativeVarSetKindToLocalRef(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, uint8_t idxOtherVar)
|
---|
7219 | {
|
---|
7220 | Assert(idxVar < RT_ELEMENTS(pReNative->Core.aVars) && (pReNative->Core.bmVars & RT_BIT_32(idxVar)));
|
---|
7221 | Assert(idxOtherVar < RT_ELEMENTS(pReNative->Core.aVars) && (pReNative->Core.bmVars & RT_BIT_32(idxOtherVar)));
|
---|
7222 |
|
---|
7223 | if (pReNative->Core.aVars[idxVar].enmKind != kIemNativeVarKind_VarRef)
|
---|
7224 | {
|
---|
7225 | /* Only simple transitions for now. */
|
---|
7226 | AssertStmt(pReNative->Core.aVars[idxVar].enmKind == kIemNativeVarKind_Invalid,
|
---|
7227 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
|
---|
7228 | pReNative->Core.aVars[idxVar].enmKind = kIemNativeVarKind_VarRef;
|
---|
7229 | }
|
---|
7230 | AssertStmt(pReNative->Core.aVars[idxVar].idxReg == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
|
---|
7231 |
|
---|
7232 | pReNative->Core.aVars[idxVar].u.idxRefVar = idxOtherVar; /* unpacked */
|
---|
7233 |
|
---|
7234 | /* Update the other variable, ensure it's a stack variable. */
|
---|
7235 | /** @todo handle variables with const values... that'll go boom now. */
|
---|
7236 | pReNative->Core.aVars[idxOtherVar].idxReferrerVar = idxVar;
|
---|
7237 | iemNativeVarSetKindToStack(pReNative, IEMNATIVE_VAR_IDX_PACK(idxOtherVar));
|
---|
7238 | }
|
---|
7239 |
|
---|
7240 |
|
---|
7241 | /**
|
---|
7242 | * Sets the variable to a reference (pointer) to a guest register reference.
|
---|
7243 | *
|
---|
7244 | * This does not require stack storage as we know the value and can always
|
---|
7245 | * reload it. Loading is postponed till needed.
|
---|
7246 | *
|
---|
7247 | * @param pReNative The recompiler state.
|
---|
7248 | * @param idxVar The variable.
|
---|
7249 | * @param enmRegClass The class guest registers to reference.
|
---|
7250 | * @param idxReg The register within @a enmRegClass to reference.
|
---|
7251 | *
|
---|
7252 | * @throws VERR_IEM_VAR_IPE_2
|
---|
7253 | */
|
---|
7254 | DECL_HIDDEN_THROW(void) iemNativeVarSetKindToGstRegRef(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar,
|
---|
7255 | IEMNATIVEGSTREGREF enmRegClass, uint8_t idxReg)
|
---|
7256 | {
|
---|
7257 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
7258 | PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
|
---|
7259 |
|
---|
7260 | if (pVar->enmKind != kIemNativeVarKind_GstRegRef)
|
---|
7261 | {
|
---|
7262 | /* Only simple transitions for now. */
|
---|
7263 | AssertStmt(pVar->enmKind == kIemNativeVarKind_Invalid, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
|
---|
7264 | pVar->enmKind = kIemNativeVarKind_GstRegRef;
|
---|
7265 | }
|
---|
7266 | AssertStmt(pVar->idxReg == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
|
---|
7267 |
|
---|
7268 | pVar->u.GstRegRef.enmClass = enmRegClass;
|
---|
7269 | pVar->u.GstRegRef.idx = idxReg;
|
---|
7270 | }
|
---|
7271 |
|
---|
7272 |
|
---|
7273 | DECL_HIDDEN_THROW(uint8_t) iemNativeArgAlloc(PIEMRECOMPILERSTATE pReNative, uint8_t iArgNo, uint8_t cbType)
|
---|
7274 | {
|
---|
7275 | return IEMNATIVE_VAR_IDX_PACK(iemNativeArgAllocInt(pReNative, iArgNo, cbType));
|
---|
7276 | }
|
---|
7277 |
|
---|
7278 |
|
---|
7279 | DECL_HIDDEN_THROW(uint8_t) iemNativeArgAllocConst(PIEMRECOMPILERSTATE pReNative, uint8_t iArgNo, uint8_t cbType, uint64_t uValue)
|
---|
7280 | {
|
---|
7281 | uint8_t const idxVar = IEMNATIVE_VAR_IDX_PACK(iemNativeArgAllocInt(pReNative, iArgNo, cbType));
|
---|
7282 |
|
---|
7283 | /* Since we're using a generic uint64_t value type, we must truncate it if
|
---|
7284 | the variable is smaller otherwise we may end up with too large value when
|
---|
7285 | scaling up a imm8 w/ sign-extension.
|
---|
7286 |
|
---|
7287 | This caused trouble with a "add bx, 0xffff" instruction (around f000:ac60
|
---|
7288 | in the bios, bx=1) when running on arm, because clang expect 16-bit
|
---|
7289 | register parameters to have bits 16 and up set to zero. Instead of
|
---|
7290 | setting x1 = 0xffff we ended up with x1 = 0xffffffffffffff and the wrong
|
---|
7291 | CF value in the result. */
|
---|
7292 | switch (cbType)
|
---|
7293 | {
|
---|
7294 | case sizeof(uint8_t): uValue &= UINT64_C(0xff); break;
|
---|
7295 | case sizeof(uint16_t): uValue &= UINT64_C(0xffff); break;
|
---|
7296 | case sizeof(uint32_t): uValue &= UINT64_C(0xffffffff); break;
|
---|
7297 | }
|
---|
7298 | iemNativeVarSetKindToConst(pReNative, idxVar, uValue);
|
---|
7299 | return idxVar;
|
---|
7300 | }
|
---|
7301 |
|
---|
7302 |
|
---|
7303 | DECL_HIDDEN_THROW(uint8_t) iemNativeArgAllocLocalRef(PIEMRECOMPILERSTATE pReNative, uint8_t iArgNo, uint8_t idxOtherVar)
|
---|
7304 | {
|
---|
7305 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxOtherVar);
|
---|
7306 | idxOtherVar = IEMNATIVE_VAR_IDX_UNPACK(idxOtherVar);
|
---|
7307 | AssertStmt( idxOtherVar < RT_ELEMENTS(pReNative->Core.aVars)
|
---|
7308 | && (pReNative->Core.bmVars & RT_BIT_32(idxOtherVar))
|
---|
7309 | && pReNative->Core.aVars[idxOtherVar].uArgNo == UINT8_MAX,
|
---|
7310 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_1));
|
---|
7311 |
|
---|
7312 | uint8_t const idxArgVar = iemNativeArgAlloc(pReNative, iArgNo, sizeof(uintptr_t));
|
---|
7313 | iemNativeVarSetKindToLocalRef(pReNative, IEMNATIVE_VAR_IDX_UNPACK(idxArgVar), idxOtherVar);
|
---|
7314 | return idxArgVar;
|
---|
7315 | }
|
---|
7316 |
|
---|
7317 |
|
---|
7318 | DECL_HIDDEN_THROW(uint8_t) iemNativeVarAlloc(PIEMRECOMPILERSTATE pReNative, uint8_t cbType)
|
---|
7319 | {
|
---|
7320 | uint8_t const idxVar = IEMNATIVE_VAR_IDX_PACK(iemNativeVarAllocInt(pReNative, cbType));
|
---|
7321 | /* Don't set to stack now, leave that to the first use as for instance
|
---|
7322 | IEM_MC_CALC_RM_EFF_ADDR may produce a const/immediate result (esp. in DOS). */
|
---|
7323 | return idxVar;
|
---|
7324 | }
|
---|
7325 |
|
---|
7326 |
|
---|
7327 | DECL_HIDDEN_THROW(uint8_t) iemNativeVarAllocConst(PIEMRECOMPILERSTATE pReNative, uint8_t cbType, uint64_t uValue)
|
---|
7328 | {
|
---|
7329 | uint8_t const idxVar = IEMNATIVE_VAR_IDX_PACK(iemNativeVarAllocInt(pReNative, cbType));
|
---|
7330 |
|
---|
7331 | /* Since we're using a generic uint64_t value type, we must truncate it if
|
---|
7332 | the variable is smaller otherwise we may end up with too large value when
|
---|
7333 | scaling up a imm8 w/ sign-extension. */
|
---|
7334 | switch (cbType)
|
---|
7335 | {
|
---|
7336 | case sizeof(uint8_t): uValue &= UINT64_C(0xff); break;
|
---|
7337 | case sizeof(uint16_t): uValue &= UINT64_C(0xffff); break;
|
---|
7338 | case sizeof(uint32_t): uValue &= UINT64_C(0xffffffff); break;
|
---|
7339 | }
|
---|
7340 | iemNativeVarSetKindToConst(pReNative, idxVar, uValue);
|
---|
7341 | return idxVar;
|
---|
7342 | }
|
---|
7343 |
|
---|
7344 |
|
---|
7345 | DECL_HIDDEN_THROW(uint8_t) iemNativeVarAllocAssign(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint8_t cbType, uint8_t idxVarOther)
|
---|
7346 | {
|
---|
7347 | uint8_t const idxVar = IEMNATIVE_VAR_IDX_PACK(iemNativeVarAllocInt(pReNative, cbType));
|
---|
7348 | iemNativeVarSetKindToStack(pReNative, IEMNATIVE_VAR_IDX_PACK(idxVar));
|
---|
7349 |
|
---|
7350 | uint8_t const idxVarOtherReg = iemNativeVarRegisterAcquire(pReNative, idxVarOther, poff, true /*fInitialized*/);
|
---|
7351 | uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxVar, poff);
|
---|
7352 |
|
---|
7353 | *poff = iemNativeEmitLoadGprFromGpr(pReNative, *poff, idxVarReg, idxVarOtherReg);
|
---|
7354 |
|
---|
7355 | /* Truncate the value to this variables size. */
|
---|
7356 | switch (cbType)
|
---|
7357 | {
|
---|
7358 | case sizeof(uint8_t): *poff = iemNativeEmitAndGpr32ByImm(pReNative, *poff, idxVarReg, UINT64_C(0xff)); break;
|
---|
7359 | case sizeof(uint16_t): *poff = iemNativeEmitAndGpr32ByImm(pReNative, *poff, idxVarReg, UINT64_C(0xffff)); break;
|
---|
7360 | case sizeof(uint32_t): *poff = iemNativeEmitAndGpr32ByImm(pReNative, *poff, idxVarReg, UINT64_C(0xffffffff)); break;
|
---|
7361 | }
|
---|
7362 |
|
---|
7363 | iemNativeVarRegisterRelease(pReNative, idxVarOther);
|
---|
7364 | iemNativeVarRegisterRelease(pReNative, idxVar);
|
---|
7365 | return idxVar;
|
---|
7366 | }
|
---|
7367 |
|
---|
7368 |
|
---|
7369 | /**
|
---|
7370 | * Makes sure variable @a idxVar has a register assigned to it and that it stays
|
---|
7371 | * fixed till we call iemNativeVarRegisterRelease.
|
---|
7372 | *
|
---|
7373 | * @returns The host register number.
|
---|
7374 | * @param pReNative The recompiler state.
|
---|
7375 | * @param idxVar The variable.
|
---|
7376 | * @param poff Pointer to the instruction buffer offset.
|
---|
7377 | * In case a register needs to be freed up or the value
|
---|
7378 | * loaded off the stack.
|
---|
7379 | * @param fInitialized Set if the variable must already have been initialized.
|
---|
7380 | * Will throw VERR_IEM_VAR_NOT_INITIALIZED if this is not
|
---|
7381 | * the case.
|
---|
7382 | * @param idxRegPref Preferred register number or UINT8_MAX.
|
---|
7383 | */
|
---|
7384 | DECL_HIDDEN_THROW(uint8_t) iemNativeVarRegisterAcquire(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, uint32_t *poff,
|
---|
7385 | bool fInitialized /*= false*/, uint8_t idxRegPref /*= UINT8_MAX*/)
|
---|
7386 | {
|
---|
7387 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
7388 | PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
|
---|
7389 | Assert(pVar->cbVar <= 8);
|
---|
7390 | Assert(!pVar->fRegAcquired);
|
---|
7391 |
|
---|
7392 | uint8_t idxReg = pVar->idxReg;
|
---|
7393 | if (idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
|
---|
7394 | {
|
---|
7395 | Assert( pVar->enmKind > kIemNativeVarKind_Invalid
|
---|
7396 | && pVar->enmKind < kIemNativeVarKind_End);
|
---|
7397 | pVar->fRegAcquired = true;
|
---|
7398 | return idxReg;
|
---|
7399 | }
|
---|
7400 |
|
---|
7401 | /*
|
---|
7402 | * If the kind of variable has not yet been set, default to 'stack'.
|
---|
7403 | */
|
---|
7404 | Assert( pVar->enmKind >= kIemNativeVarKind_Invalid
|
---|
7405 | && pVar->enmKind < kIemNativeVarKind_End);
|
---|
7406 | if (pVar->enmKind == kIemNativeVarKind_Invalid)
|
---|
7407 | iemNativeVarSetKindToStack(pReNative, idxVar);
|
---|
7408 |
|
---|
7409 | /*
|
---|
7410 | * We have to allocate a register for the variable, even if its a stack one
|
---|
7411 | * as we don't know if there are modification being made to it before its
|
---|
7412 | * finalized (todo: analyze and insert hints about that?).
|
---|
7413 | *
|
---|
7414 | * If we can, we try get the correct register for argument variables. This
|
---|
7415 | * is assuming that most argument variables are fetched as close as possible
|
---|
7416 | * to the actual call, so that there aren't any interfering hidden calls
|
---|
7417 | * (memory accesses, etc) inbetween.
|
---|
7418 | *
|
---|
7419 | * If we cannot or it's a variable, we make sure no argument registers
|
---|
7420 | * that will be used by this MC block will be allocated here, and we always
|
---|
7421 | * prefer non-volatile registers to avoid needing to spill stuff for internal
|
---|
7422 | * call.
|
---|
7423 | */
|
---|
7424 | /** @todo Detect too early argument value fetches and warn about hidden
|
---|
7425 | * calls causing less optimal code to be generated in the python script. */
|
---|
7426 |
|
---|
7427 | uint8_t const uArgNo = pVar->uArgNo;
|
---|
7428 | if ( uArgNo < RT_ELEMENTS(g_aidxIemNativeCallRegs)
|
---|
7429 | && !(pReNative->Core.bmHstRegs & RT_BIT_32(g_aidxIemNativeCallRegs[uArgNo])))
|
---|
7430 | {
|
---|
7431 | idxReg = g_aidxIemNativeCallRegs[uArgNo];
|
---|
7432 |
|
---|
7433 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
7434 | /* Writeback any dirty shadow registers we are about to unshadow. */
|
---|
7435 | *poff = iemNativeRegFlushDirtyGuestByHostRegShadow(pReNative, *poff, idxReg);
|
---|
7436 | #endif
|
---|
7437 |
|
---|
7438 | iemNativeRegClearGstRegShadowing(pReNative, idxReg, *poff);
|
---|
7439 | Log11(("iemNativeVarRegisterAcquire: idxVar=%#x idxReg=%u (matching arg %u)\n", idxVar, idxReg, uArgNo));
|
---|
7440 | }
|
---|
7441 | else if ( idxRegPref >= RT_ELEMENTS(pReNative->Core.aHstRegs)
|
---|
7442 | || (pReNative->Core.bmHstRegs & RT_BIT_32(idxRegPref)))
|
---|
7443 | {
|
---|
7444 | /** @todo there must be a better way for this and boot cArgsX? */
|
---|
7445 | uint32_t const fNotArgsMask = ~g_afIemNativeCallRegs[RT_MIN(pReNative->cArgsX, IEMNATIVE_CALL_ARG_GREG_COUNT)];
|
---|
7446 | uint32_t const fRegs = ~pReNative->Core.bmHstRegs
|
---|
7447 | & ~pReNative->Core.bmHstRegsWithGstShadow
|
---|
7448 | & (~IEMNATIVE_REG_FIXED_MASK & IEMNATIVE_HST_GREG_MASK)
|
---|
7449 | & fNotArgsMask;
|
---|
7450 | if (fRegs)
|
---|
7451 | {
|
---|
7452 | /* Pick from the top as that both arm64 and amd64 have a block of non-volatile registers there. */
|
---|
7453 | idxReg = (uint8_t)ASMBitLastSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK
|
---|
7454 | ? fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK : fRegs) - 1;
|
---|
7455 | Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows == 0);
|
---|
7456 | Assert(!(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg)));
|
---|
7457 | Log11(("iemNativeVarRegisterAcquire: idxVar=%#x idxReg=%u (uArgNo=%u)\n", idxVar, idxReg, uArgNo));
|
---|
7458 | }
|
---|
7459 | else
|
---|
7460 | {
|
---|
7461 | idxReg = iemNativeRegAllocFindFree(pReNative, poff, false /*fPreferVolatile*/,
|
---|
7462 | IEMNATIVE_HST_GREG_MASK & ~IEMNATIVE_REG_FIXED_MASK & fNotArgsMask);
|
---|
7463 | AssertStmt(idxReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_VAR));
|
---|
7464 | Log11(("iemNativeVarRegisterAcquire: idxVar=%#x idxReg=%u (slow, uArgNo=%u)\n", idxVar, idxReg, uArgNo));
|
---|
7465 | }
|
---|
7466 | }
|
---|
7467 | else
|
---|
7468 | {
|
---|
7469 | idxReg = idxRegPref;
|
---|
7470 | iemNativeRegClearGstRegShadowing(pReNative, idxReg, *poff);
|
---|
7471 | Log11(("iemNativeVarRegisterAcquire: idxVar=%#x idxReg=%u (preferred)\n", idxVar, idxReg));
|
---|
7472 | }
|
---|
7473 | iemNativeRegMarkAllocated(pReNative, idxReg, kIemNativeWhat_Var, idxVar);
|
---|
7474 | pVar->idxReg = idxReg;
|
---|
7475 |
|
---|
7476 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
7477 | pVar->fSimdReg = false;
|
---|
7478 | #endif
|
---|
7479 |
|
---|
7480 | /*
|
---|
7481 | * Load it off the stack if we've got a stack slot.
|
---|
7482 | */
|
---|
7483 | uint8_t const idxStackSlot = pVar->idxStackSlot;
|
---|
7484 | if (idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS)
|
---|
7485 | {
|
---|
7486 | Assert(fInitialized);
|
---|
7487 | int32_t const offDispBp = iemNativeStackCalcBpDisp(idxStackSlot);
|
---|
7488 | switch (pVar->cbVar)
|
---|
7489 | {
|
---|
7490 | case 1: *poff = iemNativeEmitLoadGprByBpU8( pReNative, *poff, idxReg, offDispBp); break;
|
---|
7491 | case 2: *poff = iemNativeEmitLoadGprByBpU16(pReNative, *poff, idxReg, offDispBp); break;
|
---|
7492 | case 3: AssertFailed(); RT_FALL_THRU();
|
---|
7493 | case 4: *poff = iemNativeEmitLoadGprByBpU32(pReNative, *poff, idxReg, offDispBp); break;
|
---|
7494 | default: AssertFailed(); RT_FALL_THRU();
|
---|
7495 | case 8: *poff = iemNativeEmitLoadGprByBp( pReNative, *poff, idxReg, offDispBp); break;
|
---|
7496 | }
|
---|
7497 | }
|
---|
7498 | else
|
---|
7499 | {
|
---|
7500 | Assert(idxStackSlot == UINT8_MAX);
|
---|
7501 | if (pVar->enmKind != kIemNativeVarKind_Immediate)
|
---|
7502 | AssertStmt(!fInitialized, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_NOT_INITIALIZED));
|
---|
7503 | else
|
---|
7504 | {
|
---|
7505 | /*
|
---|
7506 | * Convert from immediate to stack/register. This is currently only
|
---|
7507 | * required by IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR, IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR
|
---|
7508 | * and IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR in connection with BT, BTS, BTR, and BTC.
|
---|
7509 | */
|
---|
7510 | AssertStmt(fInitialized, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_NOT_INITIALIZED));
|
---|
7511 | Log11(("iemNativeVarRegisterAcquire: idxVar=%#x idxReg=%u uValue=%RX64 converting from immediate to stack\n",
|
---|
7512 | idxVar, idxReg, pVar->u.uValue));
|
---|
7513 | *poff = iemNativeEmitLoadGprImm64(pReNative, *poff, idxReg, pVar->u.uValue);
|
---|
7514 | pVar->enmKind = kIemNativeVarKind_Stack;
|
---|
7515 | }
|
---|
7516 | }
|
---|
7517 |
|
---|
7518 | pVar->fRegAcquired = true;
|
---|
7519 | return idxReg;
|
---|
7520 | }
|
---|
7521 |
|
---|
7522 |
|
---|
7523 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
7524 | /**
|
---|
7525 | * Makes sure variable @a idxVar has a SIMD register assigned to it and that it stays
|
---|
7526 | * fixed till we call iemNativeVarRegisterRelease.
|
---|
7527 | *
|
---|
7528 | * @returns The host register number.
|
---|
7529 | * @param pReNative The recompiler state.
|
---|
7530 | * @param idxVar The variable.
|
---|
7531 | * @param poff Pointer to the instruction buffer offset.
|
---|
7532 | * In case a register needs to be freed up or the value
|
---|
7533 | * loaded off the stack.
|
---|
7534 | * @param fInitialized Set if the variable must already have been initialized.
|
---|
7535 | * Will throw VERR_IEM_VAR_NOT_INITIALIZED if this is not
|
---|
7536 | * the case.
|
---|
7537 | * @param idxRegPref Preferred SIMD register number or UINT8_MAX.
|
---|
7538 | */
|
---|
7539 | DECL_HIDDEN_THROW(uint8_t) iemNativeVarSimdRegisterAcquire(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, uint32_t *poff,
|
---|
7540 | bool fInitialized /*= false*/, uint8_t idxRegPref /*= UINT8_MAX*/)
|
---|
7541 | {
|
---|
7542 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
7543 | PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
|
---|
7544 | Assert( pVar->cbVar == sizeof(RTUINT128U)
|
---|
7545 | || pVar->cbVar == sizeof(RTUINT256U));
|
---|
7546 | Assert(!pVar->fRegAcquired);
|
---|
7547 |
|
---|
7548 | uint8_t idxReg = pVar->idxReg;
|
---|
7549 | if (idxReg < RT_ELEMENTS(pReNative->Core.aHstSimdRegs))
|
---|
7550 | {
|
---|
7551 | Assert( pVar->enmKind > kIemNativeVarKind_Invalid
|
---|
7552 | && pVar->enmKind < kIemNativeVarKind_End);
|
---|
7553 | pVar->fRegAcquired = true;
|
---|
7554 | return idxReg;
|
---|
7555 | }
|
---|
7556 |
|
---|
7557 | /*
|
---|
7558 | * If the kind of variable has not yet been set, default to 'stack'.
|
---|
7559 | */
|
---|
7560 | Assert( pVar->enmKind >= kIemNativeVarKind_Invalid
|
---|
7561 | && pVar->enmKind < kIemNativeVarKind_End);
|
---|
7562 | if (pVar->enmKind == kIemNativeVarKind_Invalid)
|
---|
7563 | iemNativeVarSetKindToStack(pReNative, idxVar);
|
---|
7564 |
|
---|
7565 | /*
|
---|
7566 | * We have to allocate a register for the variable, even if its a stack one
|
---|
7567 | * as we don't know if there are modification being made to it before its
|
---|
7568 | * finalized (todo: analyze and insert hints about that?).
|
---|
7569 | *
|
---|
7570 | * If we can, we try get the correct register for argument variables. This
|
---|
7571 | * is assuming that most argument variables are fetched as close as possible
|
---|
7572 | * to the actual call, so that there aren't any interfering hidden calls
|
---|
7573 | * (memory accesses, etc) inbetween.
|
---|
7574 | *
|
---|
7575 | * If we cannot or it's a variable, we make sure no argument registers
|
---|
7576 | * that will be used by this MC block will be allocated here, and we always
|
---|
7577 | * prefer non-volatile registers to avoid needing to spill stuff for internal
|
---|
7578 | * call.
|
---|
7579 | */
|
---|
7580 | /** @todo Detect too early argument value fetches and warn about hidden
|
---|
7581 | * calls causing less optimal code to be generated in the python script. */
|
---|
7582 |
|
---|
7583 | uint8_t const uArgNo = pVar->uArgNo;
|
---|
7584 | Assert(uArgNo == UINT8_MAX); RT_NOREF(uArgNo); /* No SIMD registers as arguments for now. */
|
---|
7585 |
|
---|
7586 | /* SIMD is bit simpler for now because there is no support for arguments. */
|
---|
7587 | if ( idxRegPref >= RT_ELEMENTS(pReNative->Core.aHstSimdRegs)
|
---|
7588 | || (pReNative->Core.bmHstSimdRegs & RT_BIT_32(idxRegPref)))
|
---|
7589 | {
|
---|
7590 | uint32_t const fNotArgsMask = UINT32_MAX; //~g_afIemNativeCallRegs[RT_MIN(pReNative->cArgs, IEMNATIVE_CALL_ARG_GREG_COUNT)];
|
---|
7591 | uint32_t const fRegs = ~pReNative->Core.bmHstSimdRegs
|
---|
7592 | & ~pReNative->Core.bmHstSimdRegsWithGstShadow
|
---|
7593 | & (~IEMNATIVE_SIMD_REG_FIXED_MASK & IEMNATIVE_HST_SIMD_REG_MASK)
|
---|
7594 | & fNotArgsMask;
|
---|
7595 | if (fRegs)
|
---|
7596 | {
|
---|
7597 | idxReg = (uint8_t)ASMBitLastSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK
|
---|
7598 | ? fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK : fRegs) - 1;
|
---|
7599 | Assert(pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows == 0);
|
---|
7600 | Assert(!(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxReg)));
|
---|
7601 | Log11(("iemNativeVarSimdRegisterAcquire: idxVar=%#x idxReg=%u (uArgNo=%u)\n", idxVar, idxReg, uArgNo));
|
---|
7602 | }
|
---|
7603 | else
|
---|
7604 | {
|
---|
7605 | idxReg = iemNativeSimdRegAllocFindFree(pReNative, poff, false /*fPreferVolatile*/,
|
---|
7606 | IEMNATIVE_HST_SIMD_REG_MASK & ~IEMNATIVE_SIMD_REG_FIXED_MASK & fNotArgsMask);
|
---|
7607 | AssertStmt(idxReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_VAR));
|
---|
7608 | Log11(("iemNativeVarSimdRegisterAcquire: idxVar=%#x idxReg=%u (slow, uArgNo=%u)\n", idxVar, idxReg, uArgNo));
|
---|
7609 | }
|
---|
7610 | }
|
---|
7611 | else
|
---|
7612 | {
|
---|
7613 | idxReg = idxRegPref;
|
---|
7614 | AssertReleaseFailed(); //iemNativeRegClearGstRegShadowing(pReNative, idxReg, *poff);
|
---|
7615 | Log11(("iemNativeVarSimdRegisterAcquire: idxVar=%#x idxReg=%u (preferred)\n", idxVar, idxReg));
|
---|
7616 | }
|
---|
7617 | iemNativeSimdRegMarkAllocated(pReNative, idxReg, kIemNativeWhat_Var, idxVar);
|
---|
7618 |
|
---|
7619 | pVar->fSimdReg = true;
|
---|
7620 | pVar->idxReg = idxReg;
|
---|
7621 |
|
---|
7622 | /*
|
---|
7623 | * Load it off the stack if we've got a stack slot.
|
---|
7624 | */
|
---|
7625 | uint8_t const idxStackSlot = pVar->idxStackSlot;
|
---|
7626 | if (idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS)
|
---|
7627 | {
|
---|
7628 | Assert(fInitialized);
|
---|
7629 | int32_t const offDispBp = iemNativeStackCalcBpDisp(idxStackSlot);
|
---|
7630 | switch (pVar->cbVar)
|
---|
7631 | {
|
---|
7632 | case sizeof(RTUINT128U): *poff = iemNativeEmitLoadVecRegByBpU128(pReNative, *poff, idxReg, offDispBp); break;
|
---|
7633 | default: AssertFailed(); RT_FALL_THRU();
|
---|
7634 | case sizeof(RTUINT256U): *poff = iemNativeEmitLoadVecRegByBpU256(pReNative, *poff, idxReg, offDispBp); break;
|
---|
7635 | }
|
---|
7636 | }
|
---|
7637 | else
|
---|
7638 | {
|
---|
7639 | Assert(idxStackSlot == UINT8_MAX);
|
---|
7640 | AssertStmt(!fInitialized, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_NOT_INITIALIZED));
|
---|
7641 | }
|
---|
7642 | pVar->fRegAcquired = true;
|
---|
7643 | return idxReg;
|
---|
7644 | }
|
---|
7645 | #endif
|
---|
7646 |
|
---|
7647 |
|
---|
7648 | /**
|
---|
7649 | * The value of variable @a idxVar will be written in full to the @a enmGstReg
|
---|
7650 | * guest register.
|
---|
7651 | *
|
---|
7652 | * This function makes sure there is a register for it and sets it to be the
|
---|
7653 | * current shadow copy of @a enmGstReg.
|
---|
7654 | *
|
---|
7655 | * @returns The host register number.
|
---|
7656 | * @param pReNative The recompiler state.
|
---|
7657 | * @param idxVar The variable.
|
---|
7658 | * @param enmGstReg The guest register this variable will be written to
|
---|
7659 | * after this call.
|
---|
7660 | * @param poff Pointer to the instruction buffer offset.
|
---|
7661 | * In case a register needs to be freed up or if the
|
---|
7662 | * variable content needs to be loaded off the stack.
|
---|
7663 | *
|
---|
7664 | * @note We DO NOT expect @a idxVar to be an argument variable,
|
---|
7665 | * because we can only in the commit stage of an instruction when this
|
---|
7666 | * function is used.
|
---|
7667 | */
|
---|
7668 | DECL_HIDDEN_THROW(uint8_t)
|
---|
7669 | iemNativeVarRegisterAcquireForGuestReg(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, IEMNATIVEGSTREG enmGstReg, uint32_t *poff)
|
---|
7670 | {
|
---|
7671 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
7672 | PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
|
---|
7673 | Assert(!pVar->fRegAcquired);
|
---|
7674 | AssertMsgStmt( pVar->cbVar <= 8
|
---|
7675 | && ( pVar->enmKind == kIemNativeVarKind_Immediate
|
---|
7676 | || pVar->enmKind == kIemNativeVarKind_Stack),
|
---|
7677 | ("idxVar=%#x cbVar=%d enmKind=%d enmGstReg=%s\n", idxVar, pVar->cbVar,
|
---|
7678 | pVar->enmKind, g_aGstShadowInfo[enmGstReg].pszName),
|
---|
7679 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_6));
|
---|
7680 |
|
---|
7681 | /*
|
---|
7682 | * This shouldn't ever be used for arguments, unless it's in a weird else
|
---|
7683 | * branch that doesn't do any calling and even then it's questionable.
|
---|
7684 | *
|
---|
7685 | * However, in case someone writes crazy wrong MC code and does register
|
---|
7686 | * updates before making calls, just use the regular register allocator to
|
---|
7687 | * ensure we get a register suitable for the intended argument number.
|
---|
7688 | */
|
---|
7689 | AssertStmt(pVar->uArgNo == UINT8_MAX, iemNativeVarRegisterAcquire(pReNative, idxVar, poff));
|
---|
7690 |
|
---|
7691 | /*
|
---|
7692 | * If there is already a register for the variable, we transfer/set the
|
---|
7693 | * guest shadow copy assignment to it.
|
---|
7694 | */
|
---|
7695 | uint8_t idxReg = pVar->idxReg;
|
---|
7696 | if (idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
|
---|
7697 | {
|
---|
7698 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
7699 | if (enmGstReg >= kIemNativeGstReg_GprFirst && enmGstReg <= kIemNativeGstReg_GprLast)
|
---|
7700 | {
|
---|
7701 | # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
7702 | iemNativeDbgInfoAddNativeOffset(pReNative, *poff);
|
---|
7703 | iemNativeDbgInfoAddGuestRegDirty(pReNative, false /*fSimdReg*/, enmGstReg, idxReg);
|
---|
7704 | # endif
|
---|
7705 | pReNative->Core.bmGstRegShadowDirty |= RT_BIT_64(enmGstReg);
|
---|
7706 | }
|
---|
7707 | #endif
|
---|
7708 |
|
---|
7709 | if (pReNative->Core.bmGstRegShadows & RT_BIT_64(enmGstReg))
|
---|
7710 | {
|
---|
7711 | uint8_t const idxRegOld = pReNative->Core.aidxGstRegShadows[enmGstReg];
|
---|
7712 | iemNativeRegTransferGstRegShadowing(pReNative, idxRegOld, idxReg, enmGstReg, *poff);
|
---|
7713 | Log12(("iemNativeVarRegisterAcquireForGuestReg: Moved %s for guest %s into %s for full write\n",
|
---|
7714 | g_apszIemNativeHstRegNames[idxRegOld], g_aGstShadowInfo[enmGstReg].pszName, g_apszIemNativeHstRegNames[idxReg]));
|
---|
7715 | }
|
---|
7716 | else
|
---|
7717 | {
|
---|
7718 | iemNativeRegMarkAsGstRegShadow(pReNative, idxReg, enmGstReg, *poff);
|
---|
7719 | Log12(("iemNativeVarRegisterAcquireForGuestReg: Marking %s as copy of guest %s (full write)\n",
|
---|
7720 | g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName));
|
---|
7721 | }
|
---|
7722 | /** @todo figure this one out. We need some way of making sure the register isn't
|
---|
7723 | * modified after this point, just in case we start writing crappy MC code. */
|
---|
7724 | pVar->enmGstReg = enmGstReg;
|
---|
7725 | pVar->fRegAcquired = true;
|
---|
7726 | return idxReg;
|
---|
7727 | }
|
---|
7728 | Assert(pVar->uArgNo == UINT8_MAX);
|
---|
7729 |
|
---|
7730 | /*
|
---|
7731 | * Because this is supposed to be the commit stage, we're just tag along with the
|
---|
7732 | * temporary register allocator and upgrade it to a variable register.
|
---|
7733 | */
|
---|
7734 | idxReg = iemNativeRegAllocTmpForGuestReg(pReNative, poff, enmGstReg, kIemNativeGstRegUse_ForFullWrite);
|
---|
7735 | Assert(pReNative->Core.aHstRegs[idxReg].enmWhat == kIemNativeWhat_Tmp);
|
---|
7736 | Assert(pReNative->Core.aHstRegs[idxReg].idxVar == UINT8_MAX);
|
---|
7737 | pReNative->Core.aHstRegs[idxReg].enmWhat = kIemNativeWhat_Var;
|
---|
7738 | pReNative->Core.aHstRegs[idxReg].idxVar = idxVar;
|
---|
7739 | pVar->idxReg = idxReg;
|
---|
7740 |
|
---|
7741 | /*
|
---|
7742 | * Now we need to load the register value.
|
---|
7743 | */
|
---|
7744 | if (pVar->enmKind == kIemNativeVarKind_Immediate)
|
---|
7745 | *poff = iemNativeEmitLoadGprImm64(pReNative, *poff, idxReg, pVar->u.uValue);
|
---|
7746 | else
|
---|
7747 | {
|
---|
7748 | uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
|
---|
7749 | int32_t const offDispBp = iemNativeStackCalcBpDisp(idxStackSlot);
|
---|
7750 | switch (pVar->cbVar)
|
---|
7751 | {
|
---|
7752 | case sizeof(uint64_t):
|
---|
7753 | *poff = iemNativeEmitLoadGprByBp(pReNative, *poff, idxReg, offDispBp);
|
---|
7754 | break;
|
---|
7755 | case sizeof(uint32_t):
|
---|
7756 | *poff = iemNativeEmitLoadGprByBpU32(pReNative, *poff, idxReg, offDispBp);
|
---|
7757 | break;
|
---|
7758 | case sizeof(uint16_t):
|
---|
7759 | *poff = iemNativeEmitLoadGprByBpU16(pReNative, *poff, idxReg, offDispBp);
|
---|
7760 | break;
|
---|
7761 | case sizeof(uint8_t):
|
---|
7762 | *poff = iemNativeEmitLoadGprByBpU8(pReNative, *poff, idxReg, offDispBp);
|
---|
7763 | break;
|
---|
7764 | default:
|
---|
7765 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_6));
|
---|
7766 | }
|
---|
7767 | }
|
---|
7768 |
|
---|
7769 | pVar->fRegAcquired = true;
|
---|
7770 | return idxReg;
|
---|
7771 | }
|
---|
7772 |
|
---|
7773 |
|
---|
7774 | /**
|
---|
7775 | * Emit code to save volatile registers prior to a call to a helper (TLB miss).
|
---|
7776 | *
|
---|
7777 | * This is used together with iemNativeVarRestoreVolatileRegsPostHlpCall() and
|
---|
7778 | * optionally iemNativeRegRestoreGuestShadowsInVolatileRegs() to bypass the
|
---|
7779 | * requirement of flushing anything in volatile host registers when making a
|
---|
7780 | * call.
|
---|
7781 | *
|
---|
7782 | * @returns New @a off value.
|
---|
7783 | * @param pReNative The recompiler state.
|
---|
7784 | * @param off The code buffer position.
|
---|
7785 | * @param fHstRegsNotToSave Set of registers not to save & restore.
|
---|
7786 | */
|
---|
7787 | DECL_HIDDEN_THROW(uint32_t)
|
---|
7788 | iemNativeVarSaveVolatileRegsPreHlpCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fHstRegsNotToSave)
|
---|
7789 | {
|
---|
7790 | uint32_t fHstRegs = pReNative->Core.bmHstRegs & IEMNATIVE_CALL_VOLATILE_NOTMP_GREG_MASK & ~fHstRegsNotToSave;
|
---|
7791 | if (fHstRegs)
|
---|
7792 | {
|
---|
7793 | do
|
---|
7794 | {
|
---|
7795 | unsigned int const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1;
|
---|
7796 | fHstRegs &= ~RT_BIT_32(idxHstReg);
|
---|
7797 |
|
---|
7798 | if (pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Var)
|
---|
7799 | {
|
---|
7800 | uint8_t const idxVar = pReNative->Core.aHstRegs[idxHstReg].idxVar;
|
---|
7801 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
7802 | AssertStmt( IEMNATIVE_VAR_IDX_UNPACK(idxVar) < RT_ELEMENTS(pReNative->Core.aVars)
|
---|
7803 | && (pReNative->Core.bmVars & RT_BIT_32(IEMNATIVE_VAR_IDX_UNPACK(idxVar)))
|
---|
7804 | && pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxHstReg,
|
---|
7805 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_12));
|
---|
7806 | switch (pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].enmKind)
|
---|
7807 | {
|
---|
7808 | case kIemNativeVarKind_Stack:
|
---|
7809 | {
|
---|
7810 | /* Temporarily spill the variable register. */
|
---|
7811 | uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
|
---|
7812 | Log12(("iemNativeVarSaveVolatileRegsPreHlpCall: spilling idxVar=%#x/idxReg=%d onto the stack (slot %#x bp+%d, off=%#x)\n",
|
---|
7813 | idxVar, idxHstReg, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
|
---|
7814 | off = iemNativeEmitStoreGprByBp(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxHstReg);
|
---|
7815 | continue;
|
---|
7816 | }
|
---|
7817 |
|
---|
7818 | case kIemNativeVarKind_Immediate:
|
---|
7819 | case kIemNativeVarKind_VarRef:
|
---|
7820 | case kIemNativeVarKind_GstRegRef:
|
---|
7821 | /* It is weird to have any of these loaded at this point. */
|
---|
7822 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_13));
|
---|
7823 | continue;
|
---|
7824 |
|
---|
7825 | case kIemNativeVarKind_End:
|
---|
7826 | case kIemNativeVarKind_Invalid:
|
---|
7827 | break;
|
---|
7828 | }
|
---|
7829 | AssertFailed();
|
---|
7830 | }
|
---|
7831 | else
|
---|
7832 | {
|
---|
7833 | /*
|
---|
7834 | * Allocate a temporary stack slot and spill the register to it.
|
---|
7835 | */
|
---|
7836 | unsigned const idxStackSlot = ASMBitLastSetU32(~pReNative->Core.bmStack) - 1;
|
---|
7837 | AssertStmt(idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS,
|
---|
7838 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_OUT_OF_STACK_SLOTS));
|
---|
7839 | pReNative->Core.bmStack |= RT_BIT_32(idxStackSlot);
|
---|
7840 | pReNative->Core.aHstRegs[idxHstReg].idxStackSlot = (uint8_t)idxStackSlot;
|
---|
7841 | Log12(("iemNativeVarSaveVolatileRegsPreHlpCall: spilling idxReg=%d onto the stack (slot %#x bp+%d, off=%#x)\n",
|
---|
7842 | idxHstReg, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
|
---|
7843 | off = iemNativeEmitStoreGprByBp(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxHstReg);
|
---|
7844 | }
|
---|
7845 | } while (fHstRegs);
|
---|
7846 | }
|
---|
7847 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
7848 |
|
---|
7849 | /*
|
---|
7850 | * Guest register shadows are flushed to CPUMCTX at the moment and don't need allocating a stack slot
|
---|
7851 | * which would be more difficult due to spanning multiple stack slots and different sizes
|
---|
7852 | * (besides we only have a limited amount of slots at the moment).
|
---|
7853 | *
|
---|
7854 | * However the shadows need to be flushed out as the guest SIMD register might get corrupted by
|
---|
7855 | * the callee. This asserts that the registers were written back earlier and are not in the dirty state.
|
---|
7856 | */
|
---|
7857 | iemNativeSimdRegFlushGuestShadowsByHostMask(pReNative, IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK);
|
---|
7858 |
|
---|
7859 | fHstRegs = pReNative->Core.bmHstSimdRegs & IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK;
|
---|
7860 | if (fHstRegs)
|
---|
7861 | {
|
---|
7862 | do
|
---|
7863 | {
|
---|
7864 | unsigned int const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1;
|
---|
7865 | fHstRegs &= ~RT_BIT_32(idxHstReg);
|
---|
7866 |
|
---|
7867 | /* Fixed reserved and temporary registers don't need saving. */
|
---|
7868 | if ( pReNative->Core.aHstSimdRegs[idxHstReg].enmWhat == kIemNativeWhat_FixedReserved
|
---|
7869 | || pReNative->Core.aHstSimdRegs[idxHstReg].enmWhat == kIemNativeWhat_FixedTmp)
|
---|
7870 | continue;
|
---|
7871 |
|
---|
7872 | Assert(pReNative->Core.aHstSimdRegs[idxHstReg].enmWhat == kIemNativeWhat_Var);
|
---|
7873 |
|
---|
7874 | uint8_t const idxVar = pReNative->Core.aHstSimdRegs[idxHstReg].idxVar;
|
---|
7875 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
7876 | AssertStmt( IEMNATIVE_VAR_IDX_UNPACK(idxVar) < RT_ELEMENTS(pReNative->Core.aVars)
|
---|
7877 | && (pReNative->Core.bmVars & RT_BIT_32(IEMNATIVE_VAR_IDX_UNPACK(idxVar)))
|
---|
7878 | && pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxHstReg
|
---|
7879 | && pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fSimdReg
|
---|
7880 | && ( pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].cbVar == sizeof(RTUINT128U)
|
---|
7881 | || pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].cbVar == sizeof(RTUINT256U)),
|
---|
7882 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_12));
|
---|
7883 | switch (pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].enmKind)
|
---|
7884 | {
|
---|
7885 | case kIemNativeVarKind_Stack:
|
---|
7886 | {
|
---|
7887 | /* Temporarily spill the variable register. */
|
---|
7888 | uint8_t const cbVar = pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].cbVar;
|
---|
7889 | uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
|
---|
7890 | Log12(("iemNativeVarSaveVolatileRegsPreHlpCall: spilling idxVar=%#x/idxReg=%d onto the stack (slot %#x bp+%d, off=%#x)\n",
|
---|
7891 | idxVar, idxHstReg, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
|
---|
7892 | if (cbVar == sizeof(RTUINT128U))
|
---|
7893 | off = iemNativeEmitStoreVecRegByBpU128(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxHstReg);
|
---|
7894 | else
|
---|
7895 | off = iemNativeEmitStoreVecRegByBpU256(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxHstReg);
|
---|
7896 | continue;
|
---|
7897 | }
|
---|
7898 |
|
---|
7899 | case kIemNativeVarKind_Immediate:
|
---|
7900 | case kIemNativeVarKind_VarRef:
|
---|
7901 | case kIemNativeVarKind_GstRegRef:
|
---|
7902 | /* It is weird to have any of these loaded at this point. */
|
---|
7903 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_13));
|
---|
7904 | continue;
|
---|
7905 |
|
---|
7906 | case kIemNativeVarKind_End:
|
---|
7907 | case kIemNativeVarKind_Invalid:
|
---|
7908 | break;
|
---|
7909 | }
|
---|
7910 | AssertFailed();
|
---|
7911 | } while (fHstRegs);
|
---|
7912 | }
|
---|
7913 | #endif
|
---|
7914 | return off;
|
---|
7915 | }
|
---|
7916 |
|
---|
7917 |
|
---|
7918 | /**
|
---|
7919 | * Emit code to restore volatile registers after to a call to a helper.
|
---|
7920 | *
|
---|
7921 | * @returns New @a off value.
|
---|
7922 | * @param pReNative The recompiler state.
|
---|
7923 | * @param off The code buffer position.
|
---|
7924 | * @param fHstRegsNotToSave Set of registers not to save & restore.
|
---|
7925 | * @see iemNativeVarSaveVolatileRegsPreHlpCall(),
|
---|
7926 | * iemNativeRegRestoreGuestShadowsInVolatileRegs()
|
---|
7927 | */
|
---|
7928 | DECL_HIDDEN_THROW(uint32_t)
|
---|
7929 | iemNativeVarRestoreVolatileRegsPostHlpCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fHstRegsNotToSave)
|
---|
7930 | {
|
---|
7931 | uint32_t fHstRegs = pReNative->Core.bmHstRegs & IEMNATIVE_CALL_VOLATILE_NOTMP_GREG_MASK & ~fHstRegsNotToSave;
|
---|
7932 | if (fHstRegs)
|
---|
7933 | {
|
---|
7934 | do
|
---|
7935 | {
|
---|
7936 | unsigned int const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1;
|
---|
7937 | fHstRegs &= ~RT_BIT_32(idxHstReg);
|
---|
7938 |
|
---|
7939 | if (pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Var)
|
---|
7940 | {
|
---|
7941 | uint8_t const idxVar = pReNative->Core.aHstRegs[idxHstReg].idxVar;
|
---|
7942 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
7943 | AssertStmt( IEMNATIVE_VAR_IDX_UNPACK(idxVar) < RT_ELEMENTS(pReNative->Core.aVars)
|
---|
7944 | && (pReNative->Core.bmVars & RT_BIT_32(IEMNATIVE_VAR_IDX_UNPACK(idxVar)))
|
---|
7945 | && pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxHstReg,
|
---|
7946 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_12));
|
---|
7947 | switch (pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].enmKind)
|
---|
7948 | {
|
---|
7949 | case kIemNativeVarKind_Stack:
|
---|
7950 | {
|
---|
7951 | /* Unspill the variable register. */
|
---|
7952 | uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
|
---|
7953 | Log12(("iemNativeVarRestoreVolatileRegsPostHlpCall: unspilling idxVar=%#x/idxReg=%d (slot %#x bp+%d, off=%#x)\n",
|
---|
7954 | idxVar, idxHstReg, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
|
---|
7955 | off = iemNativeEmitLoadGprByBp(pReNative, off, idxHstReg, iemNativeStackCalcBpDisp(idxStackSlot));
|
---|
7956 | continue;
|
---|
7957 | }
|
---|
7958 |
|
---|
7959 | case kIemNativeVarKind_Immediate:
|
---|
7960 | case kIemNativeVarKind_VarRef:
|
---|
7961 | case kIemNativeVarKind_GstRegRef:
|
---|
7962 | /* It is weird to have any of these loaded at this point. */
|
---|
7963 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_13));
|
---|
7964 | continue;
|
---|
7965 |
|
---|
7966 | case kIemNativeVarKind_End:
|
---|
7967 | case kIemNativeVarKind_Invalid:
|
---|
7968 | break;
|
---|
7969 | }
|
---|
7970 | AssertFailed();
|
---|
7971 | }
|
---|
7972 | else
|
---|
7973 | {
|
---|
7974 | /*
|
---|
7975 | * Restore from temporary stack slot.
|
---|
7976 | */
|
---|
7977 | uint8_t const idxStackSlot = pReNative->Core.aHstRegs[idxHstReg].idxStackSlot;
|
---|
7978 | AssertContinue(idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS && (pReNative->Core.bmStack & RT_BIT_32(idxStackSlot)));
|
---|
7979 | pReNative->Core.bmStack &= ~RT_BIT_32(idxStackSlot);
|
---|
7980 | pReNative->Core.aHstRegs[idxHstReg].idxStackSlot = UINT8_MAX;
|
---|
7981 |
|
---|
7982 | off = iemNativeEmitLoadGprByBp(pReNative, off, idxHstReg, iemNativeStackCalcBpDisp(idxStackSlot));
|
---|
7983 | }
|
---|
7984 | } while (fHstRegs);
|
---|
7985 | }
|
---|
7986 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
7987 | fHstRegs = pReNative->Core.bmHstSimdRegs & IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK;
|
---|
7988 | if (fHstRegs)
|
---|
7989 | {
|
---|
7990 | do
|
---|
7991 | {
|
---|
7992 | unsigned int const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1;
|
---|
7993 | fHstRegs &= ~RT_BIT_32(idxHstReg);
|
---|
7994 |
|
---|
7995 | if ( pReNative->Core.aHstSimdRegs[idxHstReg].enmWhat == kIemNativeWhat_FixedTmp
|
---|
7996 | || pReNative->Core.aHstSimdRegs[idxHstReg].enmWhat == kIemNativeWhat_FixedReserved)
|
---|
7997 | continue;
|
---|
7998 | Assert(pReNative->Core.aHstSimdRegs[idxHstReg].enmWhat == kIemNativeWhat_Var);
|
---|
7999 |
|
---|
8000 | uint8_t const idxVar = pReNative->Core.aHstSimdRegs[idxHstReg].idxVar;
|
---|
8001 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
8002 | AssertStmt( IEMNATIVE_VAR_IDX_UNPACK(idxVar) < RT_ELEMENTS(pReNative->Core.aVars)
|
---|
8003 | && (pReNative->Core.bmVars & RT_BIT_32(IEMNATIVE_VAR_IDX_UNPACK(idxVar)))
|
---|
8004 | && pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxHstReg
|
---|
8005 | && pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fSimdReg
|
---|
8006 | && ( pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].cbVar == sizeof(RTUINT128U)
|
---|
8007 | || pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].cbVar == sizeof(RTUINT256U)),
|
---|
8008 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_12));
|
---|
8009 | switch (pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].enmKind)
|
---|
8010 | {
|
---|
8011 | case kIemNativeVarKind_Stack:
|
---|
8012 | {
|
---|
8013 | /* Unspill the variable register. */
|
---|
8014 | uint8_t const cbVar = pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].cbVar;
|
---|
8015 | uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
|
---|
8016 | Log12(("iemNativeVarRestoreVolatileRegsPostHlpCall: unspilling idxVar=%#x/idxReg=%d (slot %#x bp+%d, off=%#x)\n",
|
---|
8017 | idxVar, idxHstReg, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
|
---|
8018 |
|
---|
8019 | if (cbVar == sizeof(RTUINT128U))
|
---|
8020 | off = iemNativeEmitLoadVecRegByBpU128(pReNative, off, idxHstReg, iemNativeStackCalcBpDisp(idxStackSlot));
|
---|
8021 | else
|
---|
8022 | off = iemNativeEmitLoadVecRegByBpU256(pReNative, off, idxHstReg, iemNativeStackCalcBpDisp(idxStackSlot));
|
---|
8023 | continue;
|
---|
8024 | }
|
---|
8025 |
|
---|
8026 | case kIemNativeVarKind_Immediate:
|
---|
8027 | case kIemNativeVarKind_VarRef:
|
---|
8028 | case kIemNativeVarKind_GstRegRef:
|
---|
8029 | /* It is weird to have any of these loaded at this point. */
|
---|
8030 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_13));
|
---|
8031 | continue;
|
---|
8032 |
|
---|
8033 | case kIemNativeVarKind_End:
|
---|
8034 | case kIemNativeVarKind_Invalid:
|
---|
8035 | break;
|
---|
8036 | }
|
---|
8037 | AssertFailed();
|
---|
8038 | } while (fHstRegs);
|
---|
8039 | }
|
---|
8040 | #endif
|
---|
8041 | return off;
|
---|
8042 | }
|
---|
8043 |
|
---|
8044 |
|
---|
8045 | /**
|
---|
8046 | * Worker that frees the stack slots for variable @a idxVar if any allocated.
|
---|
8047 | *
|
---|
8048 | * This is used both by iemNativeVarFreeOneWorker and iemNativeEmitCallCommon.
|
---|
8049 | *
|
---|
8050 | * ASSUMES that @a idxVar is valid and unpacked.
|
---|
8051 | */
|
---|
8052 | DECL_FORCE_INLINE(void) iemNativeVarFreeStackSlots(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
|
---|
8053 | {
|
---|
8054 | Assert(idxVar < RT_ELEMENTS(pReNative->Core.aVars)); /* unpacked! */
|
---|
8055 | uint8_t const idxStackSlot = pReNative->Core.aVars[idxVar].idxStackSlot;
|
---|
8056 | if (idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS)
|
---|
8057 | {
|
---|
8058 | uint8_t const cbVar = pReNative->Core.aVars[idxVar].cbVar;
|
---|
8059 | uint8_t const cSlots = (cbVar + sizeof(uint64_t) - 1) / sizeof(uint64_t);
|
---|
8060 | uint32_t const fAllocMask = (uint32_t)(RT_BIT_32(cSlots) - 1U);
|
---|
8061 | Assert(cSlots > 0);
|
---|
8062 | Assert(((pReNative->Core.bmStack >> idxStackSlot) & fAllocMask) == fAllocMask);
|
---|
8063 | Log11(("iemNativeVarFreeStackSlots: idxVar=%d/%#x iSlot=%#x/%#x (cbVar=%#x)\n",
|
---|
8064 | idxVar, IEMNATIVE_VAR_IDX_PACK(idxVar), idxStackSlot, fAllocMask, cbVar));
|
---|
8065 | pReNative->Core.bmStack &= ~(fAllocMask << idxStackSlot);
|
---|
8066 | pReNative->Core.aVars[idxVar].idxStackSlot = UINT8_MAX;
|
---|
8067 | }
|
---|
8068 | else
|
---|
8069 | Assert(idxStackSlot == UINT8_MAX);
|
---|
8070 | }
|
---|
8071 |
|
---|
8072 |
|
---|
8073 | /**
|
---|
8074 | * Worker that frees a single variable.
|
---|
8075 | *
|
---|
8076 | * ASSUMES that @a idxVar is valid and unpacked.
|
---|
8077 | */
|
---|
8078 | DECLHIDDEN(void) iemNativeVarFreeOneWorker(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
|
---|
8079 | {
|
---|
8080 | Assert( pReNative->Core.aVars[idxVar].enmKind >= kIemNativeVarKind_Invalid /* Including invalid as we may have unused */
|
---|
8081 | && pReNative->Core.aVars[idxVar].enmKind < kIemNativeVarKind_End); /* variables in conditional branches. */
|
---|
8082 | Assert(!pReNative->Core.aVars[idxVar].fRegAcquired);
|
---|
8083 |
|
---|
8084 | /* Free the host register first if any assigned. */
|
---|
8085 | uint8_t const idxHstReg = pReNative->Core.aVars[idxVar].idxReg;
|
---|
8086 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
8087 | if ( idxHstReg != UINT8_MAX
|
---|
8088 | && pReNative->Core.aVars[idxVar].fSimdReg)
|
---|
8089 | {
|
---|
8090 | Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aHstSimdRegs));
|
---|
8091 | Assert(pReNative->Core.aHstSimdRegs[idxHstReg].idxVar == IEMNATIVE_VAR_IDX_PACK(idxVar));
|
---|
8092 | pReNative->Core.aHstSimdRegs[idxHstReg].idxVar = UINT8_MAX;
|
---|
8093 | pReNative->Core.bmHstSimdRegs &= ~RT_BIT_32(idxHstReg);
|
---|
8094 | }
|
---|
8095 | else
|
---|
8096 | #endif
|
---|
8097 | if (idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
|
---|
8098 | {
|
---|
8099 | Assert(pReNative->Core.aHstRegs[idxHstReg].idxVar == IEMNATIVE_VAR_IDX_PACK(idxVar));
|
---|
8100 | pReNative->Core.aHstRegs[idxHstReg].idxVar = UINT8_MAX;
|
---|
8101 | pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg);
|
---|
8102 | }
|
---|
8103 |
|
---|
8104 | /* Free argument mapping. */
|
---|
8105 | uint8_t const uArgNo = pReNative->Core.aVars[idxVar].uArgNo;
|
---|
8106 | if (uArgNo < RT_ELEMENTS(pReNative->Core.aidxArgVars))
|
---|
8107 | pReNative->Core.aidxArgVars[uArgNo] = UINT8_MAX;
|
---|
8108 |
|
---|
8109 | /* Free the stack slots. */
|
---|
8110 | iemNativeVarFreeStackSlots(pReNative, idxVar);
|
---|
8111 |
|
---|
8112 | /* Free the actual variable. */
|
---|
8113 | pReNative->Core.aVars[idxVar].enmKind = kIemNativeVarKind_Invalid;
|
---|
8114 | pReNative->Core.bmVars &= ~RT_BIT_32(idxVar);
|
---|
8115 | }
|
---|
8116 |
|
---|
8117 |
|
---|
8118 | /**
|
---|
8119 | * Worker for iemNativeVarFreeAll that's called when there is anything to do.
|
---|
8120 | */
|
---|
8121 | DECLHIDDEN(void) iemNativeVarFreeAllSlow(PIEMRECOMPILERSTATE pReNative, uint32_t bmVars)
|
---|
8122 | {
|
---|
8123 | while (bmVars != 0)
|
---|
8124 | {
|
---|
8125 | uint8_t const idxVar = ASMBitFirstSetU32(bmVars) - 1;
|
---|
8126 | bmVars &= ~RT_BIT_32(idxVar);
|
---|
8127 |
|
---|
8128 | #if 1 /** @todo optimize by simplifying this later... */
|
---|
8129 | iemNativeVarFreeOneWorker(pReNative, idxVar);
|
---|
8130 | #else
|
---|
8131 | /* Only need to free the host register, the rest is done as bulk updates below. */
|
---|
8132 | uint8_t const idxHstReg = pReNative->Core.aVars[idxVar].idxReg;
|
---|
8133 | if (idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
|
---|
8134 | {
|
---|
8135 | Assert(pReNative->Core.aHstRegs[idxHstReg].idxVar == IEMNATIVE_VAR_IDX_PACK(idxVar));
|
---|
8136 | pReNative->Core.aHstRegs[idxHstReg].idxVar = UINT8_MAX;
|
---|
8137 | pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg);
|
---|
8138 | }
|
---|
8139 | #endif
|
---|
8140 | }
|
---|
8141 | #if 0 /** @todo optimize by simplifying this later... */
|
---|
8142 | pReNative->Core.bmVars = 0;
|
---|
8143 | pReNative->Core.bmStack = 0;
|
---|
8144 | pReNative->Core.u64ArgVars = UINT64_MAX;
|
---|
8145 | #endif
|
---|
8146 | }
|
---|
8147 |
|
---|
8148 |
|
---|
8149 |
|
---|
8150 | /*********************************************************************************************************************************
|
---|
8151 | * Emitters for IEM_MC_CALL_CIMPL_XXX *
|
---|
8152 | *********************************************************************************************************************************/
|
---|
8153 |
|
---|
8154 | /**
|
---|
8155 | * Emits code to load a reference to the given guest register into @a idxGprDst.
|
---|
8156 | */
|
---|
8157 | DECL_HIDDEN_THROW(uint32_t)
|
---|
8158 | iemNativeEmitLeaGprByGstRegRef(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxGprDst,
|
---|
8159 | IEMNATIVEGSTREGREF enmClass, uint8_t idxRegInClass)
|
---|
8160 | {
|
---|
8161 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
8162 | /** @todo If we ever gonna allow referencing the RIP register we need to update guest value here. */
|
---|
8163 | #endif
|
---|
8164 |
|
---|
8165 | /*
|
---|
8166 | * Get the offset relative to the CPUMCTX structure.
|
---|
8167 | */
|
---|
8168 | uint32_t offCpumCtx;
|
---|
8169 | switch (enmClass)
|
---|
8170 | {
|
---|
8171 | case kIemNativeGstRegRef_Gpr:
|
---|
8172 | Assert(idxRegInClass < 16);
|
---|
8173 | offCpumCtx = RT_UOFFSETOF_DYN(CPUMCTX, aGRegs[idxRegInClass]);
|
---|
8174 | break;
|
---|
8175 |
|
---|
8176 | case kIemNativeGstRegRef_GprHighByte: /**< AH, CH, DH, BH*/
|
---|
8177 | Assert(idxRegInClass < 4);
|
---|
8178 | offCpumCtx = RT_UOFFSETOF_DYN(CPUMCTX, aGRegs[0].bHi) + idxRegInClass * sizeof(CPUMCTXGREG);
|
---|
8179 | break;
|
---|
8180 |
|
---|
8181 | case kIemNativeGstRegRef_EFlags:
|
---|
8182 | Assert(idxRegInClass == 0);
|
---|
8183 | offCpumCtx = RT_UOFFSETOF(CPUMCTX, eflags);
|
---|
8184 | break;
|
---|
8185 |
|
---|
8186 | case kIemNativeGstRegRef_MxCsr:
|
---|
8187 | Assert(idxRegInClass == 0);
|
---|
8188 | offCpumCtx = RT_UOFFSETOF(CPUMCTX, XState.x87.MXCSR);
|
---|
8189 | break;
|
---|
8190 |
|
---|
8191 | case kIemNativeGstRegRef_FpuReg:
|
---|
8192 | Assert(idxRegInClass < 8);
|
---|
8193 | AssertFailed(); /** @todo what kind of indexing? */
|
---|
8194 | offCpumCtx = RT_UOFFSETOF_DYN(CPUMCTX, XState.x87.aRegs[idxRegInClass]);
|
---|
8195 | break;
|
---|
8196 |
|
---|
8197 | case kIemNativeGstRegRef_MReg:
|
---|
8198 | Assert(idxRegInClass < 8);
|
---|
8199 | AssertFailed(); /** @todo what kind of indexing? */
|
---|
8200 | offCpumCtx = RT_UOFFSETOF_DYN(CPUMCTX, XState.x87.aRegs[idxRegInClass]);
|
---|
8201 | break;
|
---|
8202 |
|
---|
8203 | case kIemNativeGstRegRef_XReg:
|
---|
8204 | Assert(idxRegInClass < 16);
|
---|
8205 | offCpumCtx = RT_UOFFSETOF_DYN(CPUMCTX, XState.x87.aXMM[idxRegInClass]);
|
---|
8206 | break;
|
---|
8207 |
|
---|
8208 | case kIemNativeGstRegRef_X87: /* Not a register actually but we would just duplicate code otherwise. */
|
---|
8209 | Assert(idxRegInClass == 0);
|
---|
8210 | offCpumCtx = RT_UOFFSETOF(CPUMCTX, XState.x87);
|
---|
8211 | break;
|
---|
8212 |
|
---|
8213 | case kIemNativeGstRegRef_XState: /* Not a register actually but we would just duplicate code otherwise. */
|
---|
8214 | Assert(idxRegInClass == 0);
|
---|
8215 | offCpumCtx = RT_UOFFSETOF(CPUMCTX, XState);
|
---|
8216 | break;
|
---|
8217 |
|
---|
8218 | default:
|
---|
8219 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_5));
|
---|
8220 | }
|
---|
8221 |
|
---|
8222 | /*
|
---|
8223 | * Load the value into the destination register.
|
---|
8224 | */
|
---|
8225 | #ifdef RT_ARCH_AMD64
|
---|
8226 | off = iemNativeEmitLeaGprByVCpu(pReNative, off, idxGprDst, offCpumCtx + RT_UOFFSETOF(VMCPUCC, cpum.GstCtx));
|
---|
8227 |
|
---|
8228 | #elif defined(RT_ARCH_ARM64)
|
---|
8229 | uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
|
---|
8230 | Assert(offCpumCtx < 4096);
|
---|
8231 | pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, idxGprDst, IEMNATIVE_REG_FIXED_PCPUMCTX, offCpumCtx);
|
---|
8232 |
|
---|
8233 | #else
|
---|
8234 | # error "Port me!"
|
---|
8235 | #endif
|
---|
8236 |
|
---|
8237 | return off;
|
---|
8238 | }
|
---|
8239 |
|
---|
8240 |
|
---|
8241 | /**
|
---|
8242 | * Common code for CIMPL and AIMPL calls.
|
---|
8243 | *
|
---|
8244 | * These are calls that uses argument variables and such. They should not be
|
---|
8245 | * confused with internal calls required to implement an MC operation,
|
---|
8246 | * like a TLB load and similar.
|
---|
8247 | *
|
---|
8248 | * Upon return all that is left to do is to load any hidden arguments and
|
---|
8249 | * perform the call. All argument variables are freed.
|
---|
8250 | *
|
---|
8251 | * @returns New code buffer offset; throws VBox status code on error.
|
---|
8252 | * @param pReNative The native recompile state.
|
---|
8253 | * @param off The code buffer offset.
|
---|
8254 | * @param cArgs The total nubmer of arguments (includes hidden
|
---|
8255 | * count).
|
---|
8256 | * @param cHiddenArgs The number of hidden arguments. The hidden
|
---|
8257 | * arguments must not have any variable declared for
|
---|
8258 | * them, whereas all the regular arguments must
|
---|
8259 | * (tstIEMCheckMc ensures this).
|
---|
8260 | * @param fFlushPendingWrites Flag whether to flush pending writes (default true),
|
---|
8261 | * this will still flush pending writes in call volatile registers if false.
|
---|
8262 | */
|
---|
8263 | DECL_HIDDEN_THROW(uint32_t)
|
---|
8264 | iemNativeEmitCallCommon(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs, uint8_t cHiddenArgs,
|
---|
8265 | bool fFlushPendingWrites /*= true*/)
|
---|
8266 | {
|
---|
8267 | #ifdef VBOX_STRICT
|
---|
8268 | /*
|
---|
8269 | * Assert sanity.
|
---|
8270 | */
|
---|
8271 | Assert(cArgs <= IEMNATIVE_CALL_MAX_ARG_COUNT);
|
---|
8272 | Assert(cHiddenArgs < IEMNATIVE_CALL_ARG_GREG_COUNT);
|
---|
8273 | for (unsigned i = 0; i < cHiddenArgs; i++)
|
---|
8274 | Assert(pReNative->Core.aidxArgVars[i] == UINT8_MAX);
|
---|
8275 | for (unsigned i = cHiddenArgs; i < cArgs; i++)
|
---|
8276 | {
|
---|
8277 | Assert(pReNative->Core.aidxArgVars[i] != UINT8_MAX); /* checked by tstIEMCheckMc.cpp */
|
---|
8278 | Assert(pReNative->Core.bmVars & RT_BIT_32(pReNative->Core.aidxArgVars[i]));
|
---|
8279 | }
|
---|
8280 | iemNativeRegAssertSanity(pReNative);
|
---|
8281 | #endif
|
---|
8282 |
|
---|
8283 | /* We don't know what the called function makes use of, so flush any pending register writes. */
|
---|
8284 | RT_NOREF(fFlushPendingWrites);
|
---|
8285 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
8286 | if (fFlushPendingWrites)
|
---|
8287 | #endif
|
---|
8288 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
8289 |
|
---|
8290 | /*
|
---|
8291 | * Before we do anything else, go over variables that are referenced and
|
---|
8292 | * make sure they are not in a register.
|
---|
8293 | */
|
---|
8294 | uint32_t bmVars = pReNative->Core.bmVars;
|
---|
8295 | if (bmVars)
|
---|
8296 | {
|
---|
8297 | do
|
---|
8298 | {
|
---|
8299 | uint8_t const idxVar = ASMBitFirstSetU32(bmVars) - 1;
|
---|
8300 | bmVars &= ~RT_BIT_32(idxVar);
|
---|
8301 |
|
---|
8302 | if (pReNative->Core.aVars[idxVar].idxReferrerVar != UINT8_MAX)
|
---|
8303 | {
|
---|
8304 | uint8_t const idxRegOld = pReNative->Core.aVars[idxVar].idxReg;
|
---|
8305 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
8306 | if ( idxRegOld != UINT8_MAX
|
---|
8307 | && pReNative->Core.aVars[idxVar].fSimdReg)
|
---|
8308 | {
|
---|
8309 | Assert(idxRegOld < RT_ELEMENTS(pReNative->Core.aHstSimdRegs));
|
---|
8310 | Assert(pReNative->Core.aVars[idxVar].cbVar == sizeof(RTUINT128U) || pReNative->Core.aVars[idxVar].cbVar == sizeof(RTUINT256U));
|
---|
8311 |
|
---|
8312 | uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, IEMNATIVE_VAR_IDX_PACK(idxVar));
|
---|
8313 | Log12(("iemNativeEmitCallCommon: spilling idxVar=%d/%#x/idxReg=%d (referred to by %d) onto the stack (slot %#x bp+%d, off=%#x)\n",
|
---|
8314 | idxVar, IEMNATIVE_VAR_IDX_PACK(idxVar), idxRegOld, pReNative->Core.aVars[idxVar].idxReferrerVar,
|
---|
8315 | idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
|
---|
8316 | if (pReNative->Core.aVars[idxVar].cbVar == sizeof(RTUINT128U))
|
---|
8317 | off = iemNativeEmitStoreVecRegByBpU128(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxRegOld);
|
---|
8318 | else
|
---|
8319 | off = iemNativeEmitStoreVecRegByBpU256(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxRegOld);
|
---|
8320 |
|
---|
8321 | Assert(!( (pReNative->Core.bmGstSimdRegShadowDirtyLo128 | pReNative->Core.bmGstSimdRegShadowDirtyHi128)
|
---|
8322 | & pReNative->Core.aHstSimdRegs[idxRegOld].fGstRegShadows));
|
---|
8323 |
|
---|
8324 | pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX;
|
---|
8325 | pReNative->Core.bmHstSimdRegs &= ~RT_BIT_32(idxRegOld);
|
---|
8326 | pReNative->Core.bmHstSimdRegsWithGstShadow &= ~RT_BIT_32(idxRegOld);
|
---|
8327 | pReNative->Core.bmGstSimdRegShadows &= ~pReNative->Core.aHstSimdRegs[idxRegOld].fGstRegShadows;
|
---|
8328 | pReNative->Core.aHstSimdRegs[idxRegOld].fGstRegShadows = 0;
|
---|
8329 | }
|
---|
8330 | else
|
---|
8331 | #endif
|
---|
8332 | if (idxRegOld < RT_ELEMENTS(pReNative->Core.aHstRegs))
|
---|
8333 | {
|
---|
8334 | uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, IEMNATIVE_VAR_IDX_PACK(idxVar));
|
---|
8335 | Log12(("iemNativeEmitCallCommon: spilling idxVar=%d/%#x/idxReg=%d (referred to by %d) onto the stack (slot %#x bp+%d, off=%#x)\n",
|
---|
8336 | idxVar, IEMNATIVE_VAR_IDX_PACK(idxVar), idxRegOld, pReNative->Core.aVars[idxVar].idxReferrerVar,
|
---|
8337 | idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
|
---|
8338 | off = iemNativeEmitStoreGprByBp(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxRegOld);
|
---|
8339 |
|
---|
8340 | pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX;
|
---|
8341 | pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxRegOld);
|
---|
8342 | pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxRegOld);
|
---|
8343 | pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows;
|
---|
8344 | pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows = 0;
|
---|
8345 | }
|
---|
8346 | }
|
---|
8347 | } while (bmVars != 0);
|
---|
8348 | #if 0 //def VBOX_STRICT
|
---|
8349 | iemNativeRegAssertSanity(pReNative);
|
---|
8350 | #endif
|
---|
8351 | }
|
---|
8352 |
|
---|
8353 | uint8_t const cRegArgs = RT_MIN(cArgs, RT_ELEMENTS(g_aidxIemNativeCallRegs));
|
---|
8354 |
|
---|
8355 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
8356 | /*
|
---|
8357 | * At the very first step go over the host registers that will be used for arguments
|
---|
8358 | * don't shadow anything which needs writing back first.
|
---|
8359 | */
|
---|
8360 | for (uint32_t i = 0; i < cRegArgs; i++)
|
---|
8361 | {
|
---|
8362 | uint8_t const idxArgReg = g_aidxIemNativeCallRegs[i];
|
---|
8363 |
|
---|
8364 | /* Writeback any dirty guest shadows before using this register. */
|
---|
8365 | if (pReNative->Core.bmGstRegShadowDirty & pReNative->Core.aHstRegs[idxArgReg].fGstRegShadows)
|
---|
8366 | off = iemNativeRegFlushDirtyGuestByHostRegShadow(pReNative, off, idxArgReg);
|
---|
8367 | Assert(!(pReNative->Core.bmGstRegShadowDirty & pReNative->Core.aHstRegs[idxArgReg].fGstRegShadows));
|
---|
8368 | }
|
---|
8369 | #endif
|
---|
8370 |
|
---|
8371 | /*
|
---|
8372 | * First, go over the host registers that will be used for arguments and make
|
---|
8373 | * sure they either hold the desired argument or are free.
|
---|
8374 | */
|
---|
8375 | if (pReNative->Core.bmHstRegs & g_afIemNativeCallRegs[cRegArgs])
|
---|
8376 | {
|
---|
8377 | for (uint32_t i = 0; i < cRegArgs; i++)
|
---|
8378 | {
|
---|
8379 | uint8_t const idxArgReg = g_aidxIemNativeCallRegs[i];
|
---|
8380 | if (pReNative->Core.bmHstRegs & RT_BIT_32(idxArgReg))
|
---|
8381 | {
|
---|
8382 | if (pReNative->Core.aHstRegs[idxArgReg].enmWhat == kIemNativeWhat_Var)
|
---|
8383 | {
|
---|
8384 | uint8_t const idxVar = pReNative->Core.aHstRegs[idxArgReg].idxVar;
|
---|
8385 | IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
|
---|
8386 | PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
|
---|
8387 | Assert(pVar->idxReg == idxArgReg);
|
---|
8388 | uint8_t const uArgNo = pVar->uArgNo;
|
---|
8389 | if (uArgNo == i)
|
---|
8390 | { /* prefect */ }
|
---|
8391 | /* The variable allocator logic should make sure this is impossible,
|
---|
8392 | except for when the return register is used as a parameter (ARM,
|
---|
8393 | but not x86). */
|
---|
8394 | #if RT_BIT_32(IEMNATIVE_CALL_RET_GREG) & IEMNATIVE_CALL_ARGS_GREG_MASK
|
---|
8395 | else if (idxArgReg == IEMNATIVE_CALL_RET_GREG && uArgNo != UINT8_MAX)
|
---|
8396 | {
|
---|
8397 | # ifdef IEMNATIVE_FP_OFF_STACK_ARG0
|
---|
8398 | # error "Implement this"
|
---|
8399 | # endif
|
---|
8400 | Assert(uArgNo < IEMNATIVE_CALL_ARG_GREG_COUNT);
|
---|
8401 | uint8_t const idxFinalArgReg = g_aidxIemNativeCallRegs[uArgNo];
|
---|
8402 | AssertStmt(!(pReNative->Core.bmHstRegs & RT_BIT_32(idxFinalArgReg)),
|
---|
8403 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_10));
|
---|
8404 | off = iemNativeRegMoveVar(pReNative, off, idxVar, idxArgReg, idxFinalArgReg, "iemNativeEmitCallCommon");
|
---|
8405 | }
|
---|
8406 | #endif
|
---|
8407 | else
|
---|
8408 | {
|
---|
8409 | AssertStmt(uArgNo == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_10));
|
---|
8410 |
|
---|
8411 | if (pVar->enmKind == kIemNativeVarKind_Stack)
|
---|
8412 | off = iemNativeRegMoveOrSpillStackVar(pReNative, off, idxVar);
|
---|
8413 | else
|
---|
8414 | {
|
---|
8415 | /* just free it, can be reloaded if used again */
|
---|
8416 | pVar->idxReg = UINT8_MAX;
|
---|
8417 | pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxArgReg);
|
---|
8418 | iemNativeRegClearGstRegShadowing(pReNative, idxArgReg, off);
|
---|
8419 | }
|
---|
8420 | }
|
---|
8421 | }
|
---|
8422 | else
|
---|
8423 | AssertStmt(pReNative->Core.aHstRegs[idxArgReg].enmWhat == kIemNativeWhat_Arg,
|
---|
8424 | IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_8));
|
---|
8425 | }
|
---|
8426 | }
|
---|
8427 | #if 0 //def VBOX_STRICT
|
---|
8428 | iemNativeRegAssertSanity(pReNative);
|
---|
8429 | #endif
|
---|
8430 | }
|
---|
8431 |
|
---|
8432 | Assert(!(pReNative->Core.bmHstRegs & g_afIemNativeCallRegs[cHiddenArgs])); /* No variables for hidden arguments. */
|
---|
8433 |
|
---|
8434 | #ifdef IEMNATIVE_FP_OFF_STACK_ARG0
|
---|
8435 | /*
|
---|
8436 | * If there are any stack arguments, make sure they are in their place as well.
|
---|
8437 | *
|
---|
8438 | * We can use IEMNATIVE_CALL_ARG0_GREG as temporary register since we'll (or
|
---|
8439 | * the caller) be loading it later and it must be free (see first loop).
|
---|
8440 | */
|
---|
8441 | if (cArgs > IEMNATIVE_CALL_ARG_GREG_COUNT)
|
---|
8442 | {
|
---|
8443 | for (unsigned i = IEMNATIVE_CALL_ARG_GREG_COUNT; i < cArgs; i++)
|
---|
8444 | {
|
---|
8445 | PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[pReNative->Core.aidxArgVars[i]]; /* unpacked */
|
---|
8446 | int32_t const offBpDisp = g_aoffIemNativeCallStackArgBpDisp[i - IEMNATIVE_CALL_ARG_GREG_COUNT];
|
---|
8447 | if (pVar->idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
|
---|
8448 | {
|
---|
8449 | Assert(pVar->enmKind == kIemNativeVarKind_Stack); /* Imm as well? */
|
---|
8450 | off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDisp, pVar->idxReg);
|
---|
8451 | pReNative->Core.bmHstRegs &= ~RT_BIT_32(pVar->idxReg);
|
---|
8452 | pVar->idxReg = UINT8_MAX;
|
---|
8453 | }
|
---|
8454 | else
|
---|
8455 | {
|
---|
8456 | /* Use ARG0 as temp for stuff we need registers for. */
|
---|
8457 | switch (pVar->enmKind)
|
---|
8458 | {
|
---|
8459 | case kIemNativeVarKind_Stack:
|
---|
8460 | {
|
---|
8461 | uint8_t const idxStackSlot = pVar->idxStackSlot;
|
---|
8462 | AssertStmt(idxStackSlot != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_NOT_INITIALIZED));
|
---|
8463 | off = iemNativeEmitLoadGprByBp(pReNative, off, IEMNATIVE_CALL_ARG0_GREG /* is free */,
|
---|
8464 | iemNativeStackCalcBpDisp(idxStackSlot));
|
---|
8465 | off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDisp, IEMNATIVE_CALL_ARG0_GREG);
|
---|
8466 | continue;
|
---|
8467 | }
|
---|
8468 |
|
---|
8469 | case kIemNativeVarKind_Immediate:
|
---|
8470 | off = iemNativeEmitStoreImm64ByBp(pReNative, off, offBpDisp, pVar->u.uValue);
|
---|
8471 | continue;
|
---|
8472 |
|
---|
8473 | case kIemNativeVarKind_VarRef:
|
---|
8474 | {
|
---|
8475 | uint8_t const idxOtherVar = pVar->u.idxRefVar; /* unpacked */
|
---|
8476 | Assert(idxOtherVar < RT_ELEMENTS(pReNative->Core.aVars));
|
---|
8477 | uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, IEMNATIVE_VAR_IDX_PACK(idxOtherVar));
|
---|
8478 | int32_t const offBpDispOther = iemNativeStackCalcBpDisp(idxStackSlot);
|
---|
8479 | uint8_t const idxRegOther = pReNative->Core.aVars[idxOtherVar].idxReg;
|
---|
8480 | # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
8481 | bool const fSimdReg = pReNative->Core.aVars[idxOtherVar].fSimdReg;
|
---|
8482 | uint8_t const cbVar = pReNative->Core.aVars[idxOtherVar].cbVar;
|
---|
8483 | if ( fSimdReg
|
---|
8484 | && idxRegOther != UINT8_MAX)
|
---|
8485 | {
|
---|
8486 | Assert(idxRegOther < RT_ELEMENTS(pReNative->Core.aHstSimdRegs));
|
---|
8487 | if (cbVar == sizeof(RTUINT128U))
|
---|
8488 | off = iemNativeEmitStoreVecRegByBpU128(pReNative, off, offBpDispOther, idxRegOther);
|
---|
8489 | else
|
---|
8490 | off = iemNativeEmitStoreVecRegByBpU256(pReNative, off, offBpDispOther, idxRegOther);
|
---|
8491 | iemNativeSimdRegFreeVar(pReNative, idxRegOther, true); /** @todo const ref? */
|
---|
8492 | Assert(pReNative->Core.aVars[idxOtherVar].idxReg == UINT8_MAX);
|
---|
8493 | }
|
---|
8494 | else
|
---|
8495 | # endif
|
---|
8496 | if (idxRegOther < RT_ELEMENTS(pReNative->Core.aHstRegs))
|
---|
8497 | {
|
---|
8498 | off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDispOther, idxRegOther);
|
---|
8499 | iemNativeRegFreeVar(pReNative, idxRegOther, true); /** @todo const ref? */
|
---|
8500 | Assert(pReNative->Core.aVars[idxOtherVar].idxReg == UINT8_MAX);
|
---|
8501 | }
|
---|
8502 | Assert( pReNative->Core.aVars[idxOtherVar].idxStackSlot != UINT8_MAX
|
---|
8503 | && pReNative->Core.aVars[idxOtherVar].idxReg == UINT8_MAX);
|
---|
8504 | off = iemNativeEmitLeaGprByBp(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, offBpDispOther);
|
---|
8505 | off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDisp, IEMNATIVE_CALL_ARG0_GREG);
|
---|
8506 | continue;
|
---|
8507 | }
|
---|
8508 |
|
---|
8509 | case kIemNativeVarKind_GstRegRef:
|
---|
8510 | off = iemNativeEmitLeaGprByGstRegRef(pReNative, off, IEMNATIVE_CALL_ARG0_GREG,
|
---|
8511 | pVar->u.GstRegRef.enmClass, pVar->u.GstRegRef.idx);
|
---|
8512 | off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDisp, IEMNATIVE_CALL_ARG0_GREG);
|
---|
8513 | continue;
|
---|
8514 |
|
---|
8515 | case kIemNativeVarKind_Invalid:
|
---|
8516 | case kIemNativeVarKind_End:
|
---|
8517 | break;
|
---|
8518 | }
|
---|
8519 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_3));
|
---|
8520 | }
|
---|
8521 | }
|
---|
8522 | # if 0 //def VBOX_STRICT
|
---|
8523 | iemNativeRegAssertSanity(pReNative);
|
---|
8524 | # endif
|
---|
8525 | }
|
---|
8526 | #else
|
---|
8527 | AssertCompile(IEMNATIVE_CALL_MAX_ARG_COUNT <= IEMNATIVE_CALL_ARG_GREG_COUNT);
|
---|
8528 | #endif
|
---|
8529 |
|
---|
8530 | /*
|
---|
8531 | * Make sure the argument variables are loaded into their respective registers.
|
---|
8532 | *
|
---|
8533 | * We can optimize this by ASSUMING that any register allocations are for
|
---|
8534 | * registeres that have already been loaded and are ready. The previous step
|
---|
8535 | * saw to that.
|
---|
8536 | */
|
---|
8537 | if (~pReNative->Core.bmHstRegs & (g_afIemNativeCallRegs[cRegArgs] & ~g_afIemNativeCallRegs[cHiddenArgs]))
|
---|
8538 | {
|
---|
8539 | for (unsigned i = cHiddenArgs; i < cRegArgs; i++)
|
---|
8540 | {
|
---|
8541 | uint8_t const idxArgReg = g_aidxIemNativeCallRegs[i];
|
---|
8542 | if (pReNative->Core.bmHstRegs & RT_BIT_32(idxArgReg))
|
---|
8543 | Assert( pReNative->Core.aHstRegs[idxArgReg].idxVar == IEMNATIVE_VAR_IDX_PACK(pReNative->Core.aidxArgVars[i])
|
---|
8544 | && pReNative->Core.aVars[pReNative->Core.aidxArgVars[i]].uArgNo == i
|
---|
8545 | && pReNative->Core.aVars[pReNative->Core.aidxArgVars[i]].idxReg == idxArgReg);
|
---|
8546 | else
|
---|
8547 | {
|
---|
8548 | PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[pReNative->Core.aidxArgVars[i]]; /* unpacked */
|
---|
8549 | if (pVar->idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
|
---|
8550 | {
|
---|
8551 | Assert(pVar->enmKind == kIemNativeVarKind_Stack);
|
---|
8552 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxArgReg, pVar->idxReg);
|
---|
8553 | pReNative->Core.bmHstRegs = (pReNative->Core.bmHstRegs & ~RT_BIT_32(pVar->idxReg))
|
---|
8554 | | RT_BIT_32(idxArgReg);
|
---|
8555 | pVar->idxReg = idxArgReg;
|
---|
8556 | }
|
---|
8557 | else
|
---|
8558 | {
|
---|
8559 | /* Use ARG0 as temp for stuff we need registers for. */
|
---|
8560 | switch (pVar->enmKind)
|
---|
8561 | {
|
---|
8562 | case kIemNativeVarKind_Stack:
|
---|
8563 | {
|
---|
8564 | uint8_t const idxStackSlot = pVar->idxStackSlot;
|
---|
8565 | AssertStmt(idxStackSlot != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_NOT_INITIALIZED));
|
---|
8566 | off = iemNativeEmitLoadGprByBp(pReNative, off, idxArgReg, iemNativeStackCalcBpDisp(idxStackSlot));
|
---|
8567 | continue;
|
---|
8568 | }
|
---|
8569 |
|
---|
8570 | case kIemNativeVarKind_Immediate:
|
---|
8571 | off = iemNativeEmitLoadGprImm64(pReNative, off, idxArgReg, pVar->u.uValue);
|
---|
8572 | continue;
|
---|
8573 |
|
---|
8574 | case kIemNativeVarKind_VarRef:
|
---|
8575 | {
|
---|
8576 | uint8_t const idxOtherVar = pVar->u.idxRefVar; /* unpacked */
|
---|
8577 | Assert(idxOtherVar < RT_ELEMENTS(pReNative->Core.aVars));
|
---|
8578 | uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative,
|
---|
8579 | IEMNATIVE_VAR_IDX_PACK(idxOtherVar));
|
---|
8580 | int32_t const offBpDispOther = iemNativeStackCalcBpDisp(idxStackSlot);
|
---|
8581 | uint8_t const idxRegOther = pReNative->Core.aVars[idxOtherVar].idxReg;
|
---|
8582 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
8583 | bool const fSimdReg = pReNative->Core.aVars[idxOtherVar].fSimdReg;
|
---|
8584 | uint8_t const cbVar = pReNative->Core.aVars[idxOtherVar].cbVar;
|
---|
8585 | if ( fSimdReg
|
---|
8586 | && idxRegOther != UINT8_MAX)
|
---|
8587 | {
|
---|
8588 | Assert(idxRegOther < RT_ELEMENTS(pReNative->Core.aHstSimdRegs));
|
---|
8589 | if (cbVar == sizeof(RTUINT128U))
|
---|
8590 | off = iemNativeEmitStoreVecRegByBpU128(pReNative, off, offBpDispOther, idxRegOther);
|
---|
8591 | else
|
---|
8592 | off = iemNativeEmitStoreVecRegByBpU256(pReNative, off, offBpDispOther, idxRegOther);
|
---|
8593 | iemNativeSimdRegFreeVar(pReNative, idxRegOther, true); /** @todo const ref? */
|
---|
8594 | Assert(pReNative->Core.aVars[idxOtherVar].idxReg == UINT8_MAX);
|
---|
8595 | }
|
---|
8596 | else
|
---|
8597 | #endif
|
---|
8598 | if (idxRegOther < RT_ELEMENTS(pReNative->Core.aHstRegs))
|
---|
8599 | {
|
---|
8600 | off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDispOther, idxRegOther);
|
---|
8601 | iemNativeRegFreeVar(pReNative, idxRegOther, true); /** @todo const ref? */
|
---|
8602 | Assert(pReNative->Core.aVars[idxOtherVar].idxReg == UINT8_MAX);
|
---|
8603 | }
|
---|
8604 | Assert( pReNative->Core.aVars[idxOtherVar].idxStackSlot != UINT8_MAX
|
---|
8605 | && pReNative->Core.aVars[idxOtherVar].idxReg == UINT8_MAX);
|
---|
8606 | off = iemNativeEmitLeaGprByBp(pReNative, off, idxArgReg, offBpDispOther);
|
---|
8607 | continue;
|
---|
8608 | }
|
---|
8609 |
|
---|
8610 | case kIemNativeVarKind_GstRegRef:
|
---|
8611 | off = iemNativeEmitLeaGprByGstRegRef(pReNative, off, idxArgReg,
|
---|
8612 | pVar->u.GstRegRef.enmClass, pVar->u.GstRegRef.idx);
|
---|
8613 | continue;
|
---|
8614 |
|
---|
8615 | case kIemNativeVarKind_Invalid:
|
---|
8616 | case kIemNativeVarKind_End:
|
---|
8617 | break;
|
---|
8618 | }
|
---|
8619 | AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_3));
|
---|
8620 | }
|
---|
8621 | }
|
---|
8622 | }
|
---|
8623 | #if 0 //def VBOX_STRICT
|
---|
8624 | iemNativeRegAssertSanity(pReNative);
|
---|
8625 | #endif
|
---|
8626 | }
|
---|
8627 | #ifdef VBOX_STRICT
|
---|
8628 | else
|
---|
8629 | for (unsigned i = cHiddenArgs; i < cRegArgs; i++)
|
---|
8630 | {
|
---|
8631 | Assert(pReNative->Core.aVars[pReNative->Core.aidxArgVars[i]].uArgNo == i);
|
---|
8632 | Assert(pReNative->Core.aVars[pReNative->Core.aidxArgVars[i]].idxReg == g_aidxIemNativeCallRegs[i]);
|
---|
8633 | }
|
---|
8634 | #endif
|
---|
8635 |
|
---|
8636 | /*
|
---|
8637 | * Free all argument variables (simplified).
|
---|
8638 | * Their lifetime always expires with the call they are for.
|
---|
8639 | */
|
---|
8640 | /** @todo Make the python script check that arguments aren't used after
|
---|
8641 | * IEM_MC_CALL_XXXX. */
|
---|
8642 | /** @todo There is a special with IEM_MC_MEM_MAP_U16_RW and friends requiring
|
---|
8643 | * a IEM_MC_MEM_COMMIT_AND_UNMAP_RW after a AIMPL call typically with
|
---|
8644 | * an argument value. There is also some FPU stuff. */
|
---|
8645 | for (uint32_t i = cHiddenArgs; i < cArgs; i++)
|
---|
8646 | {
|
---|
8647 | uint8_t const idxVar = pReNative->Core.aidxArgVars[i]; /* unpacked */
|
---|
8648 | Assert(idxVar < RT_ELEMENTS(pReNative->Core.aVars));
|
---|
8649 |
|
---|
8650 | /* no need to free registers: */
|
---|
8651 | AssertMsg(i < IEMNATIVE_CALL_ARG_GREG_COUNT
|
---|
8652 | ? pReNative->Core.aVars[idxVar].idxReg == g_aidxIemNativeCallRegs[i]
|
---|
8653 | || pReNative->Core.aVars[idxVar].idxReg == UINT8_MAX
|
---|
8654 | : pReNative->Core.aVars[idxVar].idxReg == UINT8_MAX,
|
---|
8655 | ("i=%d idxVar=%d idxReg=%d, expected %d\n", i, idxVar, pReNative->Core.aVars[idxVar].idxReg,
|
---|
8656 | i < IEMNATIVE_CALL_ARG_GREG_COUNT ? g_aidxIemNativeCallRegs[i] : UINT8_MAX));
|
---|
8657 |
|
---|
8658 | pReNative->Core.aidxArgVars[i] = UINT8_MAX;
|
---|
8659 | pReNative->Core.bmVars &= ~RT_BIT_32(idxVar);
|
---|
8660 | iemNativeVarFreeStackSlots(pReNative, idxVar);
|
---|
8661 | }
|
---|
8662 | Assert(pReNative->Core.u64ArgVars == UINT64_MAX);
|
---|
8663 |
|
---|
8664 | /*
|
---|
8665 | * Flush volatile registers as we make the call.
|
---|
8666 | */
|
---|
8667 | off = iemNativeRegMoveAndFreeAndFlushAtCall(pReNative, off, cRegArgs);
|
---|
8668 |
|
---|
8669 | return off;
|
---|
8670 | }
|
---|
8671 |
|
---|
8672 |
|
---|
8673 |
|
---|
8674 | /*********************************************************************************************************************************
|
---|
8675 | * TLB Lookup. *
|
---|
8676 | *********************************************************************************************************************************/
|
---|
8677 |
|
---|
8678 | /**
|
---|
8679 | * This is called via iemNativeHlpAsmSafeWrapCheckTlbLookup.
|
---|
8680 | */
|
---|
8681 | DECLASM(void) iemNativeHlpCheckTlbLookup(PVMCPU pVCpu, uintptr_t uResult, uint64_t GCPtr, uint64_t uSegAndSizeAndAccessAndDisp)
|
---|
8682 | {
|
---|
8683 | uint8_t const iSegReg = RT_BYTE1(uSegAndSizeAndAccessAndDisp);
|
---|
8684 | uint8_t const cbMem = RT_BYTE2(uSegAndSizeAndAccessAndDisp);
|
---|
8685 | uint32_t const fAccess = (uint32_t)uSegAndSizeAndAccessAndDisp >> 16;
|
---|
8686 | uint8_t const offDisp = RT_BYTE5(uSegAndSizeAndAccessAndDisp);
|
---|
8687 | Log(("iemNativeHlpCheckTlbLookup: %x:%#RX64+%#x LB %#x fAccess=%#x -> %#RX64\n", iSegReg, GCPtr, offDisp, cbMem, fAccess, uResult));
|
---|
8688 |
|
---|
8689 | /* Do the lookup manually. */
|
---|
8690 | RTGCPTR const GCPtrFlat = (iSegReg == UINT8_MAX ? GCPtr : GCPtr + pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base) + offDisp;
|
---|
8691 | uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFlat);
|
---|
8692 | PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
|
---|
8693 | if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
|
---|
8694 | || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
|
---|
8695 | {
|
---|
8696 | /*
|
---|
8697 | * Check TLB page table level access flags.
|
---|
8698 | */
|
---|
8699 | AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
|
---|
8700 | uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
|
---|
8701 | uint64_t const fNoWriteNoDirty = !(fAccess & IEM_ACCESS_TYPE_WRITE) ? 0
|
---|
8702 | : IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PG_NO_WRITE;
|
---|
8703 | uint64_t const fFlagsAndPhysRev = pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
|
---|
8704 | | IEMTLBE_F_PG_UNASSIGNED
|
---|
8705 | | IEMTLBE_F_PT_NO_ACCESSED
|
---|
8706 | | fNoWriteNoDirty | fNoUser);
|
---|
8707 | uint64_t const uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev;
|
---|
8708 | if (RT_LIKELY(fFlagsAndPhysRev == uTlbPhysRev))
|
---|
8709 | {
|
---|
8710 | /*
|
---|
8711 | * Return the address.
|
---|
8712 | */
|
---|
8713 | uint8_t const * const pbAddr = &pTlbe->pbMappingR3[GCPtrFlat & GUEST_PAGE_OFFSET_MASK];
|
---|
8714 | if ((uintptr_t)pbAddr == uResult)
|
---|
8715 | return;
|
---|
8716 | RT_NOREF(cbMem);
|
---|
8717 | AssertFailed();
|
---|
8718 | }
|
---|
8719 | else
|
---|
8720 | AssertMsgFailed(("fFlagsAndPhysRev=%#RX64 vs uTlbPhysRev=%#RX64: %#RX64\n",
|
---|
8721 | fFlagsAndPhysRev, uTlbPhysRev, fFlagsAndPhysRev ^ uTlbPhysRev));
|
---|
8722 | }
|
---|
8723 | else
|
---|
8724 | AssertFailed();
|
---|
8725 | RT_BREAKPOINT();
|
---|
8726 | }
|
---|
8727 |
|
---|
8728 | /* The rest of the code is in IEMN8veRecompilerTlbLookup.h. */
|
---|
8729 |
|
---|
8730 |
|
---|
8731 |
|
---|
8732 | /*********************************************************************************************************************************
|
---|
8733 | * Recompiler Core. *
|
---|
8734 | *********************************************************************************************************************************/
|
---|
8735 |
|
---|
8736 | /** @callback_method_impl{FNDISREADBYTES, Dummy.} */
|
---|
8737 | static DECLCALLBACK(int) iemNativeDisasReadBytesDummy(PDISSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
|
---|
8738 | {
|
---|
8739 | RT_BZERO(&pDis->Instr.ab[offInstr], cbMaxRead);
|
---|
8740 | pDis->cbCachedInstr += cbMaxRead;
|
---|
8741 | RT_NOREF(cbMinRead);
|
---|
8742 | return VERR_NO_DATA;
|
---|
8743 | }
|
---|
8744 |
|
---|
8745 |
|
---|
8746 | DECLHIDDEN(const char *) iemNativeDbgVCpuOffsetToName(uint32_t off)
|
---|
8747 | {
|
---|
8748 | static struct { uint32_t off; const char *pszName; } const s_aMembers[] =
|
---|
8749 | {
|
---|
8750 | #define ENTRY(a_Member) { (uint32_t)RT_UOFFSETOF(VMCPUCC, a_Member), #a_Member } /* cast is for stupid MSC */
|
---|
8751 | ENTRY(fLocalForcedActions),
|
---|
8752 | ENTRY(iem.s.rcPassUp),
|
---|
8753 | ENTRY(iem.s.fExec),
|
---|
8754 | ENTRY(iem.s.pbInstrBuf),
|
---|
8755 | ENTRY(iem.s.uInstrBufPc),
|
---|
8756 | ENTRY(iem.s.GCPhysInstrBuf),
|
---|
8757 | ENTRY(iem.s.cbInstrBufTotal),
|
---|
8758 | ENTRY(iem.s.idxTbCurInstr),
|
---|
8759 | ENTRY(iem.s.fSkippingEFlags),
|
---|
8760 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
|
---|
8761 | ENTRY(iem.s.uPcUpdatingDebug),
|
---|
8762 | #endif
|
---|
8763 | #ifdef VBOX_WITH_STATISTICS
|
---|
8764 | ENTRY(iem.s.StatNativeTlbHitsForFetch),
|
---|
8765 | ENTRY(iem.s.StatNativeTlbHitsForStore),
|
---|
8766 | ENTRY(iem.s.StatNativeTlbHitsForStack),
|
---|
8767 | ENTRY(iem.s.StatNativeTlbHitsForMapped),
|
---|
8768 | ENTRY(iem.s.StatNativeCodeTlbMissesNewPage),
|
---|
8769 | ENTRY(iem.s.StatNativeCodeTlbHitsForNewPage),
|
---|
8770 | ENTRY(iem.s.StatNativeCodeTlbMissesNewPageWithOffset),
|
---|
8771 | ENTRY(iem.s.StatNativeCodeTlbHitsForNewPageWithOffset),
|
---|
8772 | #endif
|
---|
8773 | ENTRY(iem.s.DataTlb.uTlbRevision),
|
---|
8774 | ENTRY(iem.s.DataTlb.uTlbPhysRev),
|
---|
8775 | ENTRY(iem.s.DataTlb.cTlbCoreHits),
|
---|
8776 | ENTRY(iem.s.DataTlb.cTlbInlineCodeHits),
|
---|
8777 | ENTRY(iem.s.DataTlb.cTlbNativeMissTag),
|
---|
8778 | ENTRY(iem.s.DataTlb.cTlbNativeMissFlagsAndPhysRev),
|
---|
8779 | ENTRY(iem.s.DataTlb.cTlbNativeMissAlignment),
|
---|
8780 | ENTRY(iem.s.DataTlb.cTlbNativeMissCrossPage),
|
---|
8781 | ENTRY(iem.s.DataTlb.cTlbNativeMissNonCanonical),
|
---|
8782 | ENTRY(iem.s.DataTlb.aEntries),
|
---|
8783 | ENTRY(iem.s.CodeTlb.uTlbRevision),
|
---|
8784 | ENTRY(iem.s.CodeTlb.uTlbPhysRev),
|
---|
8785 | ENTRY(iem.s.CodeTlb.cTlbCoreHits),
|
---|
8786 | ENTRY(iem.s.CodeTlb.cTlbNativeMissTag),
|
---|
8787 | ENTRY(iem.s.CodeTlb.cTlbNativeMissFlagsAndPhysRev),
|
---|
8788 | ENTRY(iem.s.CodeTlb.cTlbNativeMissAlignment),
|
---|
8789 | ENTRY(iem.s.CodeTlb.cTlbNativeMissCrossPage),
|
---|
8790 | ENTRY(iem.s.CodeTlb.cTlbNativeMissNonCanonical),
|
---|
8791 | ENTRY(iem.s.CodeTlb.aEntries),
|
---|
8792 | ENTRY(pVMR3),
|
---|
8793 | ENTRY(cpum.GstCtx.rax),
|
---|
8794 | ENTRY(cpum.GstCtx.ah),
|
---|
8795 | ENTRY(cpum.GstCtx.rcx),
|
---|
8796 | ENTRY(cpum.GstCtx.ch),
|
---|
8797 | ENTRY(cpum.GstCtx.rdx),
|
---|
8798 | ENTRY(cpum.GstCtx.dh),
|
---|
8799 | ENTRY(cpum.GstCtx.rbx),
|
---|
8800 | ENTRY(cpum.GstCtx.bh),
|
---|
8801 | ENTRY(cpum.GstCtx.rsp),
|
---|
8802 | ENTRY(cpum.GstCtx.rbp),
|
---|
8803 | ENTRY(cpum.GstCtx.rsi),
|
---|
8804 | ENTRY(cpum.GstCtx.rdi),
|
---|
8805 | ENTRY(cpum.GstCtx.r8),
|
---|
8806 | ENTRY(cpum.GstCtx.r9),
|
---|
8807 | ENTRY(cpum.GstCtx.r10),
|
---|
8808 | ENTRY(cpum.GstCtx.r11),
|
---|
8809 | ENTRY(cpum.GstCtx.r12),
|
---|
8810 | ENTRY(cpum.GstCtx.r13),
|
---|
8811 | ENTRY(cpum.GstCtx.r14),
|
---|
8812 | ENTRY(cpum.GstCtx.r15),
|
---|
8813 | ENTRY(cpum.GstCtx.es.Sel),
|
---|
8814 | ENTRY(cpum.GstCtx.es.u64Base),
|
---|
8815 | ENTRY(cpum.GstCtx.es.u32Limit),
|
---|
8816 | ENTRY(cpum.GstCtx.es.Attr),
|
---|
8817 | ENTRY(cpum.GstCtx.cs.Sel),
|
---|
8818 | ENTRY(cpum.GstCtx.cs.u64Base),
|
---|
8819 | ENTRY(cpum.GstCtx.cs.u32Limit),
|
---|
8820 | ENTRY(cpum.GstCtx.cs.Attr),
|
---|
8821 | ENTRY(cpum.GstCtx.ss.Sel),
|
---|
8822 | ENTRY(cpum.GstCtx.ss.u64Base),
|
---|
8823 | ENTRY(cpum.GstCtx.ss.u32Limit),
|
---|
8824 | ENTRY(cpum.GstCtx.ss.Attr),
|
---|
8825 | ENTRY(cpum.GstCtx.ds.Sel),
|
---|
8826 | ENTRY(cpum.GstCtx.ds.u64Base),
|
---|
8827 | ENTRY(cpum.GstCtx.ds.u32Limit),
|
---|
8828 | ENTRY(cpum.GstCtx.ds.Attr),
|
---|
8829 | ENTRY(cpum.GstCtx.fs.Sel),
|
---|
8830 | ENTRY(cpum.GstCtx.fs.u64Base),
|
---|
8831 | ENTRY(cpum.GstCtx.fs.u32Limit),
|
---|
8832 | ENTRY(cpum.GstCtx.fs.Attr),
|
---|
8833 | ENTRY(cpum.GstCtx.gs.Sel),
|
---|
8834 | ENTRY(cpum.GstCtx.gs.u64Base),
|
---|
8835 | ENTRY(cpum.GstCtx.gs.u32Limit),
|
---|
8836 | ENTRY(cpum.GstCtx.gs.Attr),
|
---|
8837 | ENTRY(cpum.GstCtx.rip),
|
---|
8838 | ENTRY(cpum.GstCtx.eflags),
|
---|
8839 | ENTRY(cpum.GstCtx.uRipInhibitInt),
|
---|
8840 | ENTRY(cpum.GstCtx.cr0),
|
---|
8841 | ENTRY(cpum.GstCtx.cr4),
|
---|
8842 | ENTRY(cpum.GstCtx.aXcr[0]),
|
---|
8843 | ENTRY(cpum.GstCtx.aXcr[1]),
|
---|
8844 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
8845 | ENTRY(cpum.GstCtx.XState.x87.MXCSR),
|
---|
8846 | ENTRY(cpum.GstCtx.XState.x87.aXMM[0]),
|
---|
8847 | ENTRY(cpum.GstCtx.XState.x87.aXMM[1]),
|
---|
8848 | ENTRY(cpum.GstCtx.XState.x87.aXMM[2]),
|
---|
8849 | ENTRY(cpum.GstCtx.XState.x87.aXMM[3]),
|
---|
8850 | ENTRY(cpum.GstCtx.XState.x87.aXMM[4]),
|
---|
8851 | ENTRY(cpum.GstCtx.XState.x87.aXMM[5]),
|
---|
8852 | ENTRY(cpum.GstCtx.XState.x87.aXMM[6]),
|
---|
8853 | ENTRY(cpum.GstCtx.XState.x87.aXMM[7]),
|
---|
8854 | ENTRY(cpum.GstCtx.XState.x87.aXMM[8]),
|
---|
8855 | ENTRY(cpum.GstCtx.XState.x87.aXMM[9]),
|
---|
8856 | ENTRY(cpum.GstCtx.XState.x87.aXMM[10]),
|
---|
8857 | ENTRY(cpum.GstCtx.XState.x87.aXMM[11]),
|
---|
8858 | ENTRY(cpum.GstCtx.XState.x87.aXMM[12]),
|
---|
8859 | ENTRY(cpum.GstCtx.XState.x87.aXMM[13]),
|
---|
8860 | ENTRY(cpum.GstCtx.XState.x87.aXMM[14]),
|
---|
8861 | ENTRY(cpum.GstCtx.XState.x87.aXMM[15]),
|
---|
8862 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[0]),
|
---|
8863 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[1]),
|
---|
8864 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[2]),
|
---|
8865 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[3]),
|
---|
8866 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[4]),
|
---|
8867 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[5]),
|
---|
8868 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[6]),
|
---|
8869 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[7]),
|
---|
8870 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[8]),
|
---|
8871 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[9]),
|
---|
8872 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[10]),
|
---|
8873 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[11]),
|
---|
8874 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[12]),
|
---|
8875 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[13]),
|
---|
8876 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[14]),
|
---|
8877 | ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[15])
|
---|
8878 | #endif
|
---|
8879 | #undef ENTRY
|
---|
8880 | };
|
---|
8881 | #ifdef VBOX_STRICT
|
---|
8882 | static bool s_fOrderChecked = false;
|
---|
8883 | if (!s_fOrderChecked)
|
---|
8884 | {
|
---|
8885 | s_fOrderChecked = true;
|
---|
8886 | uint32_t offPrev = s_aMembers[0].off;
|
---|
8887 | for (unsigned i = 1; i < RT_ELEMENTS(s_aMembers); i++)
|
---|
8888 | {
|
---|
8889 | Assert(s_aMembers[i].off > offPrev);
|
---|
8890 | offPrev = s_aMembers[i].off;
|
---|
8891 | }
|
---|
8892 | }
|
---|
8893 | #endif
|
---|
8894 |
|
---|
8895 | /*
|
---|
8896 | * Binary lookup.
|
---|
8897 | */
|
---|
8898 | unsigned iStart = 0;
|
---|
8899 | unsigned iEnd = RT_ELEMENTS(s_aMembers);
|
---|
8900 | for (;;)
|
---|
8901 | {
|
---|
8902 | unsigned const iCur = iStart + (iEnd - iStart) / 2;
|
---|
8903 | uint32_t const offCur = s_aMembers[iCur].off;
|
---|
8904 | if (off < offCur)
|
---|
8905 | {
|
---|
8906 | if (iCur != iStart)
|
---|
8907 | iEnd = iCur;
|
---|
8908 | else
|
---|
8909 | break;
|
---|
8910 | }
|
---|
8911 | else if (off > offCur)
|
---|
8912 | {
|
---|
8913 | if (iCur + 1 < iEnd)
|
---|
8914 | iStart = iCur + 1;
|
---|
8915 | else
|
---|
8916 | break;
|
---|
8917 | }
|
---|
8918 | else
|
---|
8919 | return s_aMembers[iCur].pszName;
|
---|
8920 | }
|
---|
8921 | #ifdef VBOX_WITH_STATISTICS
|
---|
8922 | if (off - RT_UOFFSETOF(VMCPUCC, iem.s.acThreadedFuncStats) < RT_SIZEOFMEMB(VMCPUCC, iem.s.acThreadedFuncStats))
|
---|
8923 | return "iem.s.acThreadedFuncStats[iFn]";
|
---|
8924 | #endif
|
---|
8925 | return NULL;
|
---|
8926 | }
|
---|
8927 |
|
---|
8928 |
|
---|
8929 | /**
|
---|
8930 | * Translates a label to a name.
|
---|
8931 | */
|
---|
8932 | static const char *iemNativeGetLabelName(IEMNATIVELABELTYPE enmLabel, bool fCommonCode /*= false*/)
|
---|
8933 | {
|
---|
8934 | switch (enmLabel)
|
---|
8935 | {
|
---|
8936 | #define STR_CASE_CMN(a_Label) case kIemNativeLabelType_ ## a_Label: return fCommonCode ? "Chunk_" #a_Label : #a_Label;
|
---|
8937 | STR_CASE_CMN(Invalid);
|
---|
8938 | STR_CASE_CMN(RaiseDe);
|
---|
8939 | STR_CASE_CMN(RaiseUd);
|
---|
8940 | STR_CASE_CMN(RaiseSseRelated);
|
---|
8941 | STR_CASE_CMN(RaiseAvxRelated);
|
---|
8942 | STR_CASE_CMN(RaiseSseAvxFpRelated);
|
---|
8943 | STR_CASE_CMN(RaiseNm);
|
---|
8944 | STR_CASE_CMN(RaiseGp0);
|
---|
8945 | STR_CASE_CMN(RaiseMf);
|
---|
8946 | STR_CASE_CMN(RaiseXf);
|
---|
8947 | STR_CASE_CMN(ObsoleteTb);
|
---|
8948 | STR_CASE_CMN(NeedCsLimChecking);
|
---|
8949 | STR_CASE_CMN(CheckBranchMiss);
|
---|
8950 | STR_CASE_CMN(Return);
|
---|
8951 | STR_CASE_CMN(ReturnBreak);
|
---|
8952 | STR_CASE_CMN(ReturnBreakFF);
|
---|
8953 | STR_CASE_CMN(ReturnWithFlags);
|
---|
8954 | STR_CASE_CMN(ReturnBreakViaLookup);
|
---|
8955 | STR_CASE_CMN(ReturnBreakViaLookupWithIrq);
|
---|
8956 | STR_CASE_CMN(ReturnBreakViaLookupWithTlb);
|
---|
8957 | STR_CASE_CMN(ReturnBreakViaLookupWithTlbAndIrq);
|
---|
8958 | STR_CASE_CMN(NonZeroRetOrPassUp);
|
---|
8959 | #undef STR_CASE_CMN
|
---|
8960 | #define STR_CASE_LBL(a_Label) case kIemNativeLabelType_ ## a_Label: return #a_Label;
|
---|
8961 | STR_CASE_LBL(LoopJumpTarget);
|
---|
8962 | STR_CASE_LBL(If);
|
---|
8963 | STR_CASE_LBL(Else);
|
---|
8964 | STR_CASE_LBL(Endif);
|
---|
8965 | STR_CASE_LBL(CheckIrq);
|
---|
8966 | STR_CASE_LBL(TlbLookup);
|
---|
8967 | STR_CASE_LBL(TlbMiss);
|
---|
8968 | STR_CASE_LBL(TlbDone);
|
---|
8969 | case kIemNativeLabelType_End: break;
|
---|
8970 | }
|
---|
8971 | return NULL;
|
---|
8972 | }
|
---|
8973 |
|
---|
8974 |
|
---|
8975 | /** Info for the symbols resolver used when disassembling. */
|
---|
8976 | typedef struct IEMNATIVDISASMSYMCTX
|
---|
8977 | {
|
---|
8978 | PVMCPU pVCpu;
|
---|
8979 | PCIEMTB pTb;
|
---|
8980 | # ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
8981 | PCIEMNATIVEPERCHUNKCTX pCtx;
|
---|
8982 | # endif
|
---|
8983 | # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
8984 | PCIEMTBDBG pDbgInfo;
|
---|
8985 | # endif
|
---|
8986 | } IEMNATIVDISASMSYMCTX;
|
---|
8987 | typedef IEMNATIVDISASMSYMCTX *PIEMNATIVDISASMSYMCTX;
|
---|
8988 |
|
---|
8989 |
|
---|
8990 | /**
|
---|
8991 | * Resolve address to symbol, if we can.
|
---|
8992 | */
|
---|
8993 | static const char *iemNativeDisasmGetSymbol(PIEMNATIVDISASMSYMCTX pSymCtx, uintptr_t uAddress, char *pszBuf, size_t cbBuf)
|
---|
8994 | {
|
---|
8995 | #if defined(IEMNATIVE_WITH_TB_DEBUG_INFO) || defined(IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE)
|
---|
8996 | PCIEMTB const pTb = pSymCtx->pTb;
|
---|
8997 | uintptr_t const offNative = (uAddress - (uintptr_t)pTb->Native.paInstructions) / sizeof(IEMNATIVEINSTR);
|
---|
8998 | if (offNative <= pTb->Native.cInstructions)
|
---|
8999 | {
|
---|
9000 | # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
9001 | /*
|
---|
9002 | * Scan debug info for a matching label.
|
---|
9003 | * Since the debug info should be 100% linear, we can do a binary search here.
|
---|
9004 | */
|
---|
9005 | PCIEMTBDBG const pDbgInfo = pSymCtx->pDbgInfo;
|
---|
9006 | if (pDbgInfo)
|
---|
9007 | {
|
---|
9008 | uint32_t const cEntries = pDbgInfo->cEntries;
|
---|
9009 | uint32_t idxEnd = cEntries;
|
---|
9010 | uint32_t idxStart = 0;
|
---|
9011 | for (;;)
|
---|
9012 | {
|
---|
9013 | /* Find a NativeOffset record close to the midpoint. */
|
---|
9014 | uint32_t idx = idxStart + (idxEnd - idxStart) / 2;
|
---|
9015 | while (idx > idxStart && pDbgInfo->aEntries[idx].Gen.uType != kIemTbDbgEntryType_NativeOffset)
|
---|
9016 | idx--;
|
---|
9017 | if (pDbgInfo->aEntries[idx].Gen.uType != kIemTbDbgEntryType_NativeOffset)
|
---|
9018 | {
|
---|
9019 | idx = idxStart + (idxEnd - idxStart) / 2 + 1;
|
---|
9020 | while (idx < idxEnd && pDbgInfo->aEntries[idx].Gen.uType != kIemTbDbgEntryType_NativeOffset)
|
---|
9021 | idx++;
|
---|
9022 | if (idx >= idxEnd)
|
---|
9023 | break;
|
---|
9024 | }
|
---|
9025 |
|
---|
9026 | /* Do the binary searching thing. */
|
---|
9027 | if (offNative < pDbgInfo->aEntries[idx].NativeOffset.offNative)
|
---|
9028 | {
|
---|
9029 | if (idx > idxStart)
|
---|
9030 | idxEnd = idx;
|
---|
9031 | else
|
---|
9032 | break;
|
---|
9033 | }
|
---|
9034 | else if (offNative > pDbgInfo->aEntries[idx].NativeOffset.offNative)
|
---|
9035 | {
|
---|
9036 | idx += 1;
|
---|
9037 | if (idx < idxEnd)
|
---|
9038 | idxStart = idx;
|
---|
9039 | else
|
---|
9040 | break;
|
---|
9041 | }
|
---|
9042 | else
|
---|
9043 | {
|
---|
9044 | /* Got a matching offset, scan forward till we hit a label, but
|
---|
9045 | stop when the native offset changes. */
|
---|
9046 | while (++idx < cEntries)
|
---|
9047 | switch (pDbgInfo->aEntries[idx].Gen.uType)
|
---|
9048 | {
|
---|
9049 | case kIemTbDbgEntryType_Label:
|
---|
9050 | {
|
---|
9051 | IEMNATIVELABELTYPE const enmLabel = (IEMNATIVELABELTYPE)pDbgInfo->aEntries[idx].Label.enmLabel;
|
---|
9052 | const char * const pszName = iemNativeGetLabelName(enmLabel);
|
---|
9053 | if (enmLabel < kIemNativeLabelType_FirstWithMultipleInstances)
|
---|
9054 | return pszName;
|
---|
9055 | RTStrPrintf(pszBuf, cbBuf, "%s_%u", pszName, pDbgInfo->aEntries[idx].Label.uData);
|
---|
9056 | return pszBuf;
|
---|
9057 | }
|
---|
9058 |
|
---|
9059 | case kIemTbDbgEntryType_NativeOffset:
|
---|
9060 | if (pDbgInfo->aEntries[idx].NativeOffset.offNative != offNative)
|
---|
9061 | return NULL;
|
---|
9062 | break;
|
---|
9063 | }
|
---|
9064 | break;
|
---|
9065 | }
|
---|
9066 | }
|
---|
9067 | }
|
---|
9068 | # endif
|
---|
9069 | }
|
---|
9070 | # ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
9071 | else
|
---|
9072 | {
|
---|
9073 | PCIEMNATIVEPERCHUNKCTX const pChunkCtx = pSymCtx->pCtx;
|
---|
9074 | if (pChunkCtx)
|
---|
9075 | for (uint32_t i = 1; i < RT_ELEMENTS(pChunkCtx->apExitLabels); i++)
|
---|
9076 | if ((PIEMNATIVEINSTR)uAddress == pChunkCtx->apExitLabels[i])
|
---|
9077 | return iemNativeGetLabelName((IEMNATIVELABELTYPE)i, true /*fCommonCode*/);
|
---|
9078 | }
|
---|
9079 | # endif
|
---|
9080 | #endif
|
---|
9081 | RT_NOREF(pSymCtx, uAddress, pszBuf, cbBuf);
|
---|
9082 | return NULL;
|
---|
9083 | }
|
---|
9084 |
|
---|
9085 | #ifndef VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER
|
---|
9086 |
|
---|
9087 | /**
|
---|
9088 | * @callback_method_impl{FNDISGETSYMBOL}
|
---|
9089 | */
|
---|
9090 | static DECLCALLBACK(int) iemNativeDisasmGetSymbolCb(PCDISSTATE pDis, uint32_t u32Sel, RTUINTPTR uAddress,
|
---|
9091 | char *pszBuf, size_t cchBuf, RTINTPTR *poff, void *pvUser)
|
---|
9092 | {
|
---|
9093 | const char * const pszSym = iemNativeDisasmGetSymbol((PIEMNATIVDISASMSYMCTX)pvUser, uAddress, pszBuf, cchBuf);
|
---|
9094 | if (pszSym)
|
---|
9095 | {
|
---|
9096 | *poff = 0;
|
---|
9097 | if (pszSym != pszBuf)
|
---|
9098 | return RTStrCopy(pszBuf, cchBuf, pszSym);
|
---|
9099 | return VINF_SUCCESS;
|
---|
9100 | }
|
---|
9101 | RT_NOREF(pDis, u32Sel);
|
---|
9102 | return VERR_SYMBOL_NOT_FOUND;
|
---|
9103 | }
|
---|
9104 |
|
---|
9105 | #else /* VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER */
|
---|
9106 |
|
---|
9107 | /**
|
---|
9108 | * Annotates an instruction decoded by the capstone disassembler.
|
---|
9109 | */
|
---|
9110 | static const char *
|
---|
9111 | iemNativeDisasmAnnotateCapstone(PIEMNATIVDISASMSYMCTX pSymCtx, cs_insn const *pInstr, char *pszBuf, size_t cchBuf)
|
---|
9112 | {
|
---|
9113 | # if defined(RT_ARCH_ARM64)
|
---|
9114 | if ( (pInstr->id >= ARM64_INS_LD1 && pInstr->id < ARM64_INS_LSL)
|
---|
9115 | || (pInstr->id >= ARM64_INS_ST1 && pInstr->id < ARM64_INS_SUB))
|
---|
9116 | {
|
---|
9117 | /* This is bit crappy, but the disassembler provides incomplete addressing details. */
|
---|
9118 | AssertCompile(IEMNATIVE_REG_FIXED_PVMCPU == 28 && IEMNATIVE_REG_FIXED_PCPUMCTX == 27);
|
---|
9119 | char const *psz = strchr(pInstr->op_str, '[');
|
---|
9120 | if (psz && psz[1] == 'x' && psz[2] == '2' && (psz[3] == '7' || psz[3] == '8'))
|
---|
9121 | {
|
---|
9122 | uint32_t const offVCpu = psz[3] == '8'? 0 : RT_UOFFSETOF(VMCPU, cpum.GstCtx);
|
---|
9123 | int32_t off = -1;
|
---|
9124 | psz += 4;
|
---|
9125 | if (*psz == ']')
|
---|
9126 | off = 0;
|
---|
9127 | else if (*psz == ',')
|
---|
9128 | {
|
---|
9129 | psz = RTStrStripL(psz + 1);
|
---|
9130 | if (*psz == '#')
|
---|
9131 | off = RTStrToInt32(&psz[1]);
|
---|
9132 | /** @todo deal with index registers and LSL as well... */
|
---|
9133 | }
|
---|
9134 | if (off >= 0)
|
---|
9135 | return iemNativeDbgVCpuOffsetToName(offVCpu + (uint32_t)off);
|
---|
9136 | }
|
---|
9137 | }
|
---|
9138 | else if (pInstr->id == ARM64_INS_B || pInstr->id == ARM64_INS_BL)
|
---|
9139 | {
|
---|
9140 | const char *pszAddr = strchr(pInstr->op_str, '#');
|
---|
9141 | if (pszAddr)
|
---|
9142 | {
|
---|
9143 | uint64_t uAddr = RTStrToUInt64(pszAddr + 1);
|
---|
9144 | if (uAddr != 0)
|
---|
9145 | return iemNativeDisasmGetSymbol(pSymCtx, uAddr, pszBuf, cchBuf);
|
---|
9146 | }
|
---|
9147 | }
|
---|
9148 | # endif
|
---|
9149 | RT_NOREF(pSymCtx, pInstr, pszBuf, cchBuf);
|
---|
9150 | return NULL;
|
---|
9151 | }
|
---|
9152 | #endif /* VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER */
|
---|
9153 |
|
---|
9154 |
|
---|
9155 | DECLHIDDEN(void) iemNativeDisassembleTb(PVMCPU pVCpu, PCIEMTB pTb, PCDBGFINFOHLP pHlp) RT_NOEXCEPT
|
---|
9156 | {
|
---|
9157 | AssertReturnVoid((pTb->fFlags & IEMTB_F_TYPE_MASK) == IEMTB_F_TYPE_NATIVE);
|
---|
9158 | #if defined(RT_ARCH_AMD64)
|
---|
9159 | static const char * const a_apszMarkers[] =
|
---|
9160 | {
|
---|
9161 | /*[0]=*/ "unknown0", "CheckCsLim", "ConsiderLimChecking", "CheckOpcodes",
|
---|
9162 | /*[4]=*/ "PcAfterBranch", "LoadTlbForNewPage", "LoadTlbAfterBranch"
|
---|
9163 | };
|
---|
9164 | #endif
|
---|
9165 |
|
---|
9166 | char szDisBuf[512];
|
---|
9167 | DISSTATE Dis;
|
---|
9168 | PCIEMNATIVEINSTR const paNative = pTb->Native.paInstructions;
|
---|
9169 | uint32_t const cNative = pTb->Native.cInstructions;
|
---|
9170 | uint32_t offNative = 0;
|
---|
9171 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
9172 | PCIEMTBDBG const pDbgInfo = pTb->pDbgInfo;
|
---|
9173 | #endif
|
---|
9174 | DISCPUMODE enmGstCpuMode = (pTb->fFlags & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT ? DISCPUMODE_16BIT
|
---|
9175 | : (pTb->fFlags & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT ? DISCPUMODE_32BIT
|
---|
9176 | : DISCPUMODE_64BIT;
|
---|
9177 | #ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
9178 | # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
9179 | IEMNATIVDISASMSYMCTX SymCtx = { pVCpu, pTb, iemExecMemGetTbChunkCtx(pVCpu, pTb), pDbgInfo };
|
---|
9180 | # else
|
---|
9181 | IEMNATIVDISASMSYMCTX SymCtx = { pVCpu, pTb, iemExecMemGetTbChunkCtx(pVCpu, pTb) };
|
---|
9182 | # endif
|
---|
9183 | #elif defined(IEMNATIVE_WITH_TB_DEBUG_INFO)
|
---|
9184 | IEMNATIVDISASMSYMCTX SymCtx = { pVCpu, pTb, pDbgInfo };
|
---|
9185 | #else
|
---|
9186 | IEMNATIVDISASMSYMCTX SymCtx = { pVCpu, pTb };
|
---|
9187 | #endif
|
---|
9188 | #if defined(RT_ARCH_AMD64) && !defined(VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER)
|
---|
9189 | DISCPUMODE const enmHstCpuMode = DISCPUMODE_64BIT;
|
---|
9190 | #elif defined(RT_ARCH_ARM64) && !defined(VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER)
|
---|
9191 | DISCPUMODE const enmHstCpuMode = DISCPUMODE_ARMV8_A64;
|
---|
9192 | #elif !defined(VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER)
|
---|
9193 | # error "Port me"
|
---|
9194 | #else
|
---|
9195 | csh hDisasm = ~(size_t)0;
|
---|
9196 | # if defined(RT_ARCH_AMD64)
|
---|
9197 | cs_err rcCs = cs_open(CS_ARCH_X86, CS_MODE_LITTLE_ENDIAN | CS_MODE_64, &hDisasm);
|
---|
9198 | # elif defined(RT_ARCH_ARM64)
|
---|
9199 | cs_err rcCs = cs_open(CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN, &hDisasm);
|
---|
9200 | # else
|
---|
9201 | # error "Port me"
|
---|
9202 | # endif
|
---|
9203 | AssertMsgReturnVoid(rcCs == CS_ERR_OK, ("%d (%#x)\n", rcCs, rcCs));
|
---|
9204 |
|
---|
9205 | //rcCs = cs_option(hDisasm, CS_OPT_DETAIL, CS_OPT_ON); - not needed as pInstr->detail doesn't provide full memory detail.
|
---|
9206 | //Assert(rcCs == CS_ERR_OK);
|
---|
9207 | #endif
|
---|
9208 |
|
---|
9209 | /*
|
---|
9210 | * Print TB info.
|
---|
9211 | */
|
---|
9212 | pHlp->pfnPrintf(pHlp,
|
---|
9213 | "pTb=%p: GCPhysPc=%RGp (%%%RGv) cInstructions=%u LB %#x cRanges=%u\n"
|
---|
9214 | "pTb=%p: cUsed=%u msLastUsed=%u fFlags=%#010x %s\n",
|
---|
9215 | pTb, pTb->GCPhysPc,
|
---|
9216 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
9217 | pTb->pDbgInfo ? pTb->pDbgInfo->FlatPc : RTGCPTR_MAX,
|
---|
9218 | #else
|
---|
9219 | pTb->FlatPc,
|
---|
9220 | #endif
|
---|
9221 | pTb->cInstructions, pTb->cbOpcodes, pTb->cRanges,
|
---|
9222 | pTb, pTb->cUsed, pTb->msLastUsed, pTb->fFlags, iemTbFlagsToString(pTb->fFlags, szDisBuf, sizeof(szDisBuf)));
|
---|
9223 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
9224 | if (pDbgInfo && pDbgInfo->cEntries > 1)
|
---|
9225 | {
|
---|
9226 | Assert(pDbgInfo->aEntries[0].Gen.uType == kIemTbDbgEntryType_NativeOffset);
|
---|
9227 |
|
---|
9228 | /*
|
---|
9229 | * This disassembly is driven by the debug info which follows the native
|
---|
9230 | * code and indicates when it starts with the next guest instructions,
|
---|
9231 | * where labels are and such things.
|
---|
9232 | */
|
---|
9233 | uint32_t idxThreadedCall = 0;
|
---|
9234 | uint32_t idxGuestInstr = 0;
|
---|
9235 | uint32_t fExec = pTb->fFlags & UINT32_C(0x00ffffff);
|
---|
9236 | uint8_t idxRange = UINT8_MAX;
|
---|
9237 | uint8_t const cRanges = RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges));
|
---|
9238 | uint32_t offRange = 0;
|
---|
9239 | uint32_t offOpcodes = 0;
|
---|
9240 | uint32_t const cbOpcodes = pTb->cbOpcodes;
|
---|
9241 | RTGCPHYS GCPhysPc = pTb->GCPhysPc;
|
---|
9242 | uint32_t const cDbgEntries = pDbgInfo->cEntries;
|
---|
9243 | uint32_t iDbgEntry = 1;
|
---|
9244 | uint32_t offDbgNativeNext = pDbgInfo->aEntries[0].NativeOffset.offNative;
|
---|
9245 |
|
---|
9246 | while (offNative < cNative)
|
---|
9247 | {
|
---|
9248 | /* If we're at or have passed the point where the next chunk of debug
|
---|
9249 | info starts, process it. */
|
---|
9250 | if (offDbgNativeNext <= offNative)
|
---|
9251 | {
|
---|
9252 | offDbgNativeNext = UINT32_MAX;
|
---|
9253 | for (; iDbgEntry < cDbgEntries; iDbgEntry++)
|
---|
9254 | {
|
---|
9255 | switch (pDbgInfo->aEntries[iDbgEntry].Gen.uType)
|
---|
9256 | {
|
---|
9257 | case kIemTbDbgEntryType_GuestInstruction:
|
---|
9258 | {
|
---|
9259 | /* Did the exec flag change? */
|
---|
9260 | if (fExec != pDbgInfo->aEntries[iDbgEntry].GuestInstruction.fExec)
|
---|
9261 | {
|
---|
9262 | pHlp->pfnPrintf(pHlp,
|
---|
9263 | " fExec change %#08x -> %#08x %s\n",
|
---|
9264 | fExec, pDbgInfo->aEntries[iDbgEntry].GuestInstruction.fExec,
|
---|
9265 | iemTbFlagsToString(pDbgInfo->aEntries[iDbgEntry].GuestInstruction.fExec,
|
---|
9266 | szDisBuf, sizeof(szDisBuf)));
|
---|
9267 | fExec = pDbgInfo->aEntries[iDbgEntry].GuestInstruction.fExec;
|
---|
9268 | enmGstCpuMode = (fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT ? DISCPUMODE_16BIT
|
---|
9269 | : (fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT ? DISCPUMODE_32BIT
|
---|
9270 | : DISCPUMODE_64BIT;
|
---|
9271 | }
|
---|
9272 |
|
---|
9273 | /* New opcode range? We need to fend up a spurious debug info entry here for cases
|
---|
9274 | where the compilation was aborted before the opcode was recorded and the actual
|
---|
9275 | instruction was translated to a threaded call. This may happen when we run out
|
---|
9276 | of ranges, or when some complicated interrupts/FFs are found to be pending or
|
---|
9277 | similar. So, we just deal with it here rather than in the compiler code as it
|
---|
9278 | is a lot simpler to do here. */
|
---|
9279 | if ( idxRange == UINT8_MAX
|
---|
9280 | || idxRange >= cRanges
|
---|
9281 | || offRange >= pTb->aRanges[idxRange].cbOpcodes)
|
---|
9282 | {
|
---|
9283 | idxRange += 1;
|
---|
9284 | if (idxRange < cRanges)
|
---|
9285 | offRange = !idxRange ? 0 : offRange - pTb->aRanges[idxRange - 1].cbOpcodes;
|
---|
9286 | else
|
---|
9287 | continue;
|
---|
9288 | Assert(offOpcodes == pTb->aRanges[idxRange].offOpcodes + offRange);
|
---|
9289 | GCPhysPc = pTb->aRanges[idxRange].offPhysPage
|
---|
9290 | + (pTb->aRanges[idxRange].idxPhysPage == 0
|
---|
9291 | ? pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK
|
---|
9292 | : pTb->aGCPhysPages[pTb->aRanges[idxRange].idxPhysPage - 1]);
|
---|
9293 | pHlp->pfnPrintf(pHlp, " Range #%u: GCPhysPc=%RGp LB %#x [idxPg=%d]\n",
|
---|
9294 | idxRange, GCPhysPc, pTb->aRanges[idxRange].cbOpcodes,
|
---|
9295 | pTb->aRanges[idxRange].idxPhysPage);
|
---|
9296 | GCPhysPc += offRange;
|
---|
9297 | }
|
---|
9298 |
|
---|
9299 | /* Disassemble the instruction. */
|
---|
9300 | //uint8_t const cbInstrMax = RT_MIN(pTb->aRanges[idxRange].cbOpcodes - offRange, 15);
|
---|
9301 | uint8_t const cbInstrMax = RT_MIN(cbOpcodes - offOpcodes, 15);
|
---|
9302 | uint32_t cbInstr = 1;
|
---|
9303 | int rc = DISInstrWithPrefetchedBytes(GCPhysPc, enmGstCpuMode, DISOPTYPE_ALL,
|
---|
9304 | &pTb->pabOpcodes[offOpcodes], cbInstrMax,
|
---|
9305 | iemNativeDisasReadBytesDummy, NULL, &Dis, &cbInstr);
|
---|
9306 | if (RT_SUCCESS(rc))
|
---|
9307 | {
|
---|
9308 | size_t cch = DISFormatYasmEx(&Dis, szDisBuf, sizeof(szDisBuf),
|
---|
9309 | DIS_FMT_FLAGS_BYTES_WIDTH_MAKE(10) | DIS_FMT_FLAGS_BYTES_LEFT
|
---|
9310 | | DIS_FMT_FLAGS_RELATIVE_BRANCH | DIS_FMT_FLAGS_C_HEX,
|
---|
9311 | NULL /*pfnGetSymbol*/, NULL /*pvUser*/);
|
---|
9312 |
|
---|
9313 | static unsigned const s_offMarker = 55;
|
---|
9314 | static char const s_szMarker[] = " ; <--- guest";
|
---|
9315 | if (cch < s_offMarker)
|
---|
9316 | {
|
---|
9317 | memset(&szDisBuf[cch], ' ', s_offMarker - cch);
|
---|
9318 | cch = s_offMarker;
|
---|
9319 | }
|
---|
9320 | if (cch + sizeof(s_szMarker) <= sizeof(szDisBuf))
|
---|
9321 | memcpy(&szDisBuf[cch], s_szMarker, sizeof(s_szMarker));
|
---|
9322 |
|
---|
9323 | pHlp->pfnPrintf(pHlp, "\n %%%%%RGp: %s #%u\n", GCPhysPc, szDisBuf, idxGuestInstr);
|
---|
9324 | }
|
---|
9325 | else
|
---|
9326 | {
|
---|
9327 | pHlp->pfnPrintf(pHlp, "\n %%%%%RGp: %.*Rhxs - guest disassembly failure %Rrc\n",
|
---|
9328 | GCPhysPc, cbInstrMax, &pTb->pabOpcodes[offOpcodes], rc);
|
---|
9329 | cbInstr = 1;
|
---|
9330 | }
|
---|
9331 | idxGuestInstr++;
|
---|
9332 | GCPhysPc += cbInstr;
|
---|
9333 | offOpcodes += cbInstr;
|
---|
9334 | offRange += cbInstr;
|
---|
9335 | continue;
|
---|
9336 | }
|
---|
9337 |
|
---|
9338 | case kIemTbDbgEntryType_ThreadedCall:
|
---|
9339 | pHlp->pfnPrintf(pHlp,
|
---|
9340 | " Call #%u to %s (%u args) - %s\n",
|
---|
9341 | idxThreadedCall,
|
---|
9342 | g_apszIemThreadedFunctions[pDbgInfo->aEntries[iDbgEntry].ThreadedCall.enmCall],
|
---|
9343 | g_acIemThreadedFunctionUsedArgs[pDbgInfo->aEntries[iDbgEntry].ThreadedCall.enmCall],
|
---|
9344 | pDbgInfo->aEntries[iDbgEntry].ThreadedCall.fRecompiled ? "recompiled" : "todo");
|
---|
9345 | idxThreadedCall++;
|
---|
9346 | continue;
|
---|
9347 |
|
---|
9348 | case kIemTbDbgEntryType_GuestRegShadowing:
|
---|
9349 | {
|
---|
9350 | PCIEMTBDBGENTRY const pEntry = &pDbgInfo->aEntries[iDbgEntry];
|
---|
9351 | const char * const pszGstReg = g_aGstShadowInfo[pEntry->GuestRegShadowing.idxGstReg].pszName;
|
---|
9352 | if (pEntry->GuestRegShadowing.idxHstReg == UINT8_MAX)
|
---|
9353 | pHlp->pfnPrintf(pHlp, " Guest register %s != host register %s\n", pszGstReg,
|
---|
9354 | g_apszIemNativeHstRegNames[pEntry->GuestRegShadowing.idxHstRegPrev]);
|
---|
9355 | else if (pEntry->GuestRegShadowing.idxHstRegPrev == UINT8_MAX)
|
---|
9356 | pHlp->pfnPrintf(pHlp, " Guest register %s == host register %s \n", pszGstReg,
|
---|
9357 | g_apszIemNativeHstRegNames[pEntry->GuestRegShadowing.idxHstReg]);
|
---|
9358 | else
|
---|
9359 | pHlp->pfnPrintf(pHlp, " Guest register %s == host register %s (previously in %s)\n", pszGstReg,
|
---|
9360 | g_apszIemNativeHstRegNames[pEntry->GuestRegShadowing.idxHstReg],
|
---|
9361 | g_apszIemNativeHstRegNames[pEntry->GuestRegShadowing.idxHstRegPrev]);
|
---|
9362 | continue;
|
---|
9363 | }
|
---|
9364 |
|
---|
9365 | #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
|
---|
9366 | case kIemTbDbgEntryType_GuestSimdRegShadowing:
|
---|
9367 | {
|
---|
9368 | PCIEMTBDBGENTRY const pEntry = &pDbgInfo->aEntries[iDbgEntry];
|
---|
9369 | const char * const pszGstReg = g_aGstSimdShadowInfo[pEntry->GuestSimdRegShadowing.idxGstSimdReg].pszName;
|
---|
9370 | if (pEntry->GuestSimdRegShadowing.idxHstSimdReg == UINT8_MAX)
|
---|
9371 | pHlp->pfnPrintf(pHlp, " Guest SIMD register %s != host SIMD register %s\n", pszGstReg,
|
---|
9372 | g_apszIemNativeHstSimdRegNames[pEntry->GuestSimdRegShadowing.idxHstSimdRegPrev]);
|
---|
9373 | else if (pEntry->GuestSimdRegShadowing.idxHstSimdRegPrev == UINT8_MAX)
|
---|
9374 | pHlp->pfnPrintf(pHlp, " Guest SIMD register %s == host SIMD register %s\n", pszGstReg,
|
---|
9375 | g_apszIemNativeHstSimdRegNames[pEntry->GuestSimdRegShadowing.idxHstSimdReg]);
|
---|
9376 | else
|
---|
9377 | pHlp->pfnPrintf(pHlp, " Guest SIMD register %s == host SIMD register %s (previously in %s)\n", pszGstReg,
|
---|
9378 | g_apszIemNativeHstSimdRegNames[pEntry->GuestSimdRegShadowing.idxHstSimdReg],
|
---|
9379 | g_apszIemNativeHstSimdRegNames[pEntry->GuestSimdRegShadowing.idxHstSimdRegPrev]);
|
---|
9380 | continue;
|
---|
9381 | }
|
---|
9382 | #endif
|
---|
9383 |
|
---|
9384 | case kIemTbDbgEntryType_Label:
|
---|
9385 | {
|
---|
9386 | const char *pszName = iemNativeGetLabelName((IEMNATIVELABELTYPE)pDbgInfo->aEntries[iDbgEntry].Label.enmLabel);
|
---|
9387 | if (pDbgInfo->aEntries[iDbgEntry].Label.enmLabel >= kIemNativeLabelType_FirstWithMultipleInstances)
|
---|
9388 | {
|
---|
9389 | const char *pszComment = pDbgInfo->aEntries[iDbgEntry].Label.enmLabel == kIemNativeLabelType_Else
|
---|
9390 | ? " ; regs state restored pre-if-block" : "";
|
---|
9391 | pHlp->pfnPrintf(pHlp, " %s_%u:%s\n", pszName, pDbgInfo->aEntries[iDbgEntry].Label.uData, pszComment);
|
---|
9392 | }
|
---|
9393 | else
|
---|
9394 | pHlp->pfnPrintf(pHlp, " %s:\n", pszName);
|
---|
9395 | continue;
|
---|
9396 | }
|
---|
9397 |
|
---|
9398 | case kIemTbDbgEntryType_NativeOffset:
|
---|
9399 | offDbgNativeNext = pDbgInfo->aEntries[iDbgEntry].NativeOffset.offNative;
|
---|
9400 | Assert(offDbgNativeNext >= offNative);
|
---|
9401 | break;
|
---|
9402 |
|
---|
9403 | #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
|
---|
9404 | case kIemTbDbgEntryType_DelayedPcUpdate:
|
---|
9405 | pHlp->pfnPrintf(pHlp, " Updating guest PC value by %u (cInstrSkipped=%u)\n",
|
---|
9406 | pDbgInfo->aEntries[iDbgEntry].DelayedPcUpdate.offPc,
|
---|
9407 | pDbgInfo->aEntries[iDbgEntry].DelayedPcUpdate.cInstrSkipped);
|
---|
9408 | continue;
|
---|
9409 | #endif
|
---|
9410 |
|
---|
9411 | #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
|
---|
9412 | case kIemTbDbgEntryType_GuestRegDirty:
|
---|
9413 | {
|
---|
9414 | PCIEMTBDBGENTRY const pEntry = &pDbgInfo->aEntries[iDbgEntry];
|
---|
9415 | const char * const pszGstReg = pEntry->GuestRegDirty.fSimdReg
|
---|
9416 | ? g_aGstSimdShadowInfo[pEntry->GuestRegDirty.idxGstReg].pszName
|
---|
9417 | : g_aGstShadowInfo[pEntry->GuestRegDirty.idxGstReg].pszName;
|
---|
9418 | const char * const pszHstReg = pEntry->GuestRegDirty.fSimdReg
|
---|
9419 | ? g_apszIemNativeHstSimdRegNames[pEntry->GuestRegDirty.idxHstReg]
|
---|
9420 | : g_apszIemNativeHstRegNames[pEntry->GuestRegDirty.idxHstReg];
|
---|
9421 | pHlp->pfnPrintf(pHlp, " Guest register %s (shadowed by %s) is now marked dirty (intent)\n",
|
---|
9422 | pszGstReg, pszHstReg);
|
---|
9423 | continue;
|
---|
9424 | }
|
---|
9425 |
|
---|
9426 | case kIemTbDbgEntryType_GuestRegWriteback:
|
---|
9427 | pHlp->pfnPrintf(pHlp, " Writing dirty %s registers (gst %#RX32)\n",
|
---|
9428 | pDbgInfo->aEntries[iDbgEntry].GuestRegWriteback.fSimdReg ? "SIMD" : "general",
|
---|
9429 | (uint64_t)pDbgInfo->aEntries[iDbgEntry].GuestRegWriteback.fGstReg
|
---|
9430 | << (pDbgInfo->aEntries[iDbgEntry].GuestRegWriteback.cShift * 25));
|
---|
9431 | continue;
|
---|
9432 | #endif
|
---|
9433 |
|
---|
9434 | default:
|
---|
9435 | AssertFailed();
|
---|
9436 | }
|
---|
9437 | iDbgEntry++;
|
---|
9438 | break;
|
---|
9439 | }
|
---|
9440 | }
|
---|
9441 |
|
---|
9442 | /*
|
---|
9443 | * Disassemble the next native instruction.
|
---|
9444 | */
|
---|
9445 | PCIEMNATIVEINSTR const pNativeCur = &paNative[offNative];
|
---|
9446 | # ifndef VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER
|
---|
9447 | uint32_t cbInstr = sizeof(paNative[0]);
|
---|
9448 | int const rc = DISInstr(pNativeCur, enmHstCpuMode, &Dis, &cbInstr);
|
---|
9449 | if (RT_SUCCESS(rc))
|
---|
9450 | {
|
---|
9451 | # if defined(RT_ARCH_AMD64)
|
---|
9452 | if (Dis.pCurInstr->uOpcode == OP_NOP && cbInstr == 7) /* iemNativeEmitMarker */
|
---|
9453 | {
|
---|
9454 | uint32_t const uInfo = *(uint32_t const *)&Dis.Instr.ab[3];
|
---|
9455 | if (RT_HIWORD(uInfo) < kIemThreadedFunc_End)
|
---|
9456 | pHlp->pfnPrintf(pHlp, " %p: nop ; marker: call #%u to %s (%u args) - %s\n",
|
---|
9457 | pNativeCur, uInfo & 0x7fff, g_apszIemThreadedFunctions[RT_HIWORD(uInfo)],
|
---|
9458 | g_acIemThreadedFunctionUsedArgs[RT_HIWORD(uInfo)],
|
---|
9459 | uInfo & 0x8000 ? "recompiled" : "todo");
|
---|
9460 | else if ((uInfo & ~RT_BIT_32(31)) < RT_ELEMENTS(a_apszMarkers))
|
---|
9461 | pHlp->pfnPrintf(pHlp, " %p: nop ; marker: %s\n", pNativeCur, a_apszMarkers[uInfo & ~RT_BIT_32(31)]);
|
---|
9462 | else
|
---|
9463 | pHlp->pfnPrintf(pHlp, " %p: nop ; unknown marker: %#x (%d)\n", pNativeCur, uInfo, uInfo);
|
---|
9464 | }
|
---|
9465 | else
|
---|
9466 | # endif
|
---|
9467 | {
|
---|
9468 | const char *pszAnnotation = NULL;
|
---|
9469 | # ifdef RT_ARCH_AMD64
|
---|
9470 | DISFormatYasmEx(&Dis, szDisBuf, sizeof(szDisBuf),
|
---|
9471 | DIS_FMT_FLAGS_BYTES_WIDTH_MAKE(10) | DIS_FMT_FLAGS_BYTES_LEFT
|
---|
9472 | | DIS_FMT_FLAGS_RELATIVE_BRANCH | DIS_FMT_FLAGS_C_HEX,
|
---|
9473 | iemNativeDisasmGetSymbolCb, &SymCtx);
|
---|
9474 | PCDISOPPARAM pMemOp;
|
---|
9475 | if (DISUSE_IS_EFFECTIVE_ADDR(Dis.aParams[0].fUse))
|
---|
9476 | pMemOp = &Dis.aParams[0];
|
---|
9477 | else if (DISUSE_IS_EFFECTIVE_ADDR(Dis.aParams[1].fUse))
|
---|
9478 | pMemOp = &Dis.aParams[1];
|
---|
9479 | else if (DISUSE_IS_EFFECTIVE_ADDR(Dis.aParams[2].fUse))
|
---|
9480 | pMemOp = &Dis.aParams[2];
|
---|
9481 | else
|
---|
9482 | pMemOp = NULL;
|
---|
9483 | if ( pMemOp
|
---|
9484 | && pMemOp->x86.Base.idxGenReg == IEMNATIVE_REG_FIXED_PVMCPU
|
---|
9485 | && (pMemOp->fUse & (DISUSE_BASE | DISUSE_REG_GEN64)) == (DISUSE_BASE | DISUSE_REG_GEN64))
|
---|
9486 | pszAnnotation = iemNativeDbgVCpuOffsetToName(pMemOp->fUse & DISUSE_DISPLACEMENT32
|
---|
9487 | ? pMemOp->x86.uDisp.u32 : pMemOp->x86.uDisp.u8);
|
---|
9488 |
|
---|
9489 | # elif defined(RT_ARCH_ARM64)
|
---|
9490 | DISFormatArmV8Ex(&Dis, szDisBuf, sizeof(szDisBuf),
|
---|
9491 | DIS_FMT_FLAGS_BYTES_LEFT | DIS_FMT_FLAGS_RELATIVE_BRANCH | DIS_FMT_FLAGS_C_HEX,
|
---|
9492 | iemNativeDisasmGetSymbolCb, &SymCtx);
|
---|
9493 | # else
|
---|
9494 | # error "Port me"
|
---|
9495 | # endif
|
---|
9496 | if (pszAnnotation)
|
---|
9497 | {
|
---|
9498 | static unsigned const s_offAnnotation = 55;
|
---|
9499 | size_t const cchAnnotation = strlen(pszAnnotation);
|
---|
9500 | size_t cchDis = strlen(szDisBuf);
|
---|
9501 | if (RT_MAX(cchDis, s_offAnnotation) + sizeof(" ; ") + cchAnnotation <= sizeof(szDisBuf))
|
---|
9502 | {
|
---|
9503 | if (cchDis < s_offAnnotation)
|
---|
9504 | {
|
---|
9505 | memset(&szDisBuf[cchDis], ' ', s_offAnnotation - cchDis);
|
---|
9506 | cchDis = s_offAnnotation;
|
---|
9507 | }
|
---|
9508 | szDisBuf[cchDis++] = ' ';
|
---|
9509 | szDisBuf[cchDis++] = ';';
|
---|
9510 | szDisBuf[cchDis++] = ' ';
|
---|
9511 | memcpy(&szDisBuf[cchDis], pszAnnotation, cchAnnotation + 1);
|
---|
9512 | }
|
---|
9513 | }
|
---|
9514 | pHlp->pfnPrintf(pHlp, " %p: %s\n", pNativeCur, szDisBuf);
|
---|
9515 | }
|
---|
9516 | }
|
---|
9517 | else
|
---|
9518 | {
|
---|
9519 | # if defined(RT_ARCH_AMD64)
|
---|
9520 | pHlp->pfnPrintf(pHlp, " %p: %.*Rhxs - disassembly failure %Rrc\n",
|
---|
9521 | pNativeCur, RT_MIN(cNative - offNative, 16), pNativeCur, rc);
|
---|
9522 | # elif defined(RT_ARCH_ARM64)
|
---|
9523 | pHlp->pfnPrintf(pHlp, " %p: %#010RX32 - disassembly failure %Rrc\n", pNativeCur, *pNativeCur, rc);
|
---|
9524 | # else
|
---|
9525 | # error "Port me"
|
---|
9526 | # endif
|
---|
9527 | cbInstr = sizeof(paNative[0]);
|
---|
9528 | }
|
---|
9529 | offNative += cbInstr / sizeof(paNative[0]);
|
---|
9530 |
|
---|
9531 | # else /* VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER */
|
---|
9532 | cs_insn *pInstr;
|
---|
9533 | size_t cInstrs = cs_disasm(hDisasm, (const uint8_t *)pNativeCur, (cNative - offNative) * sizeof(*pNativeCur),
|
---|
9534 | (uintptr_t)pNativeCur, 1, &pInstr);
|
---|
9535 | if (cInstrs > 0)
|
---|
9536 | {
|
---|
9537 | Assert(cInstrs == 1);
|
---|
9538 | const char * const pszAnnotation = iemNativeDisasmAnnotateCapstone(&SymCtx, pInstr, szDisBuf, sizeof(szDisBuf));
|
---|
9539 | size_t const cchOp = strlen(pInstr->op_str);
|
---|
9540 | # if defined(RT_ARCH_AMD64)
|
---|
9541 | if (pszAnnotation)
|
---|
9542 | pHlp->pfnPrintf(pHlp, " %p: %.*Rhxs %-7s %s%*s ; %s\n",
|
---|
9543 | pNativeCur, pInstr->size, pNativeCur, pInstr->mnemonic, pInstr->op_str,
|
---|
9544 | cchOp < 55 ? 55 - cchOp : 0, "", pszAnnotation);
|
---|
9545 | else
|
---|
9546 | pHlp->pfnPrintf(pHlp, " %p: %.*Rhxs %-7s %s\n",
|
---|
9547 | pNativeCur, pInstr->size, pNativeCur, pInstr->mnemonic, pInstr->op_str);
|
---|
9548 |
|
---|
9549 | # else
|
---|
9550 | if (pszAnnotation)
|
---|
9551 | pHlp->pfnPrintf(pHlp, " %p: %#010RX32 %-7s %s%*s ; %s\n",
|
---|
9552 | pNativeCur, *pNativeCur, pInstr->mnemonic, pInstr->op_str,
|
---|
9553 | cchOp < 55 ? 55 - cchOp : 0, "", pszAnnotation);
|
---|
9554 | else
|
---|
9555 | pHlp->pfnPrintf(pHlp, " %p: %#010RX32 %-7s %s\n",
|
---|
9556 | pNativeCur, *pNativeCur, pInstr->mnemonic, pInstr->op_str);
|
---|
9557 | # endif
|
---|
9558 | offNative += pInstr->size / sizeof(*pNativeCur);
|
---|
9559 | cs_free(pInstr, cInstrs);
|
---|
9560 | }
|
---|
9561 | else
|
---|
9562 | {
|
---|
9563 | # if defined(RT_ARCH_AMD64)
|
---|
9564 | pHlp->pfnPrintf(pHlp, " %p: %.*Rhxs - disassembly failure %d\n",
|
---|
9565 | pNativeCur, RT_MIN(cNative - offNative, 16), pNativeCur, cs_errno(hDisasm)));
|
---|
9566 | # else
|
---|
9567 | pHlp->pfnPrintf(pHlp, " %p: %#010RX32 - disassembly failure %d\n", pNativeCur, *pNativeCur, cs_errno(hDisasm));
|
---|
9568 | # endif
|
---|
9569 | offNative++;
|
---|
9570 | }
|
---|
9571 | # endif /* VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER */
|
---|
9572 | }
|
---|
9573 | }
|
---|
9574 | else
|
---|
9575 | #endif /* IEMNATIVE_WITH_TB_DEBUG_INFO */
|
---|
9576 | {
|
---|
9577 | /*
|
---|
9578 | * No debug info, just disassemble the x86 code and then the native code.
|
---|
9579 | *
|
---|
9580 | * First the guest code:
|
---|
9581 | */
|
---|
9582 | for (unsigned i = 0; i < pTb->cRanges; i++)
|
---|
9583 | {
|
---|
9584 | RTGCPHYS GCPhysPc = pTb->aRanges[i].offPhysPage
|
---|
9585 | + (pTb->aRanges[i].idxPhysPage == 0
|
---|
9586 | ? pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK
|
---|
9587 | : pTb->aGCPhysPages[pTb->aRanges[i].idxPhysPage - 1]);
|
---|
9588 | pHlp->pfnPrintf(pHlp, " Range #%u: GCPhysPc=%RGp LB %#x [idxPg=%d]\n",
|
---|
9589 | i, GCPhysPc, pTb->aRanges[i].cbOpcodes, pTb->aRanges[i].idxPhysPage);
|
---|
9590 | unsigned off = pTb->aRanges[i].offOpcodes;
|
---|
9591 | /** @todo this ain't working when crossing pages! */
|
---|
9592 | unsigned const cbOpcodes = pTb->aRanges[i].cbOpcodes + off;
|
---|
9593 | while (off < cbOpcodes)
|
---|
9594 | {
|
---|
9595 | uint32_t cbInstr = 1;
|
---|
9596 | int rc = DISInstrWithPrefetchedBytes(GCPhysPc, enmGstCpuMode, DISOPTYPE_ALL,
|
---|
9597 | &pTb->pabOpcodes[off], cbOpcodes - off,
|
---|
9598 | iemNativeDisasReadBytesDummy, NULL, &Dis, &cbInstr);
|
---|
9599 | if (RT_SUCCESS(rc))
|
---|
9600 | {
|
---|
9601 | DISFormatYasmEx(&Dis, szDisBuf, sizeof(szDisBuf),
|
---|
9602 | DIS_FMT_FLAGS_BYTES_WIDTH_MAKE(10) | DIS_FMT_FLAGS_BYTES_LEFT
|
---|
9603 | | DIS_FMT_FLAGS_RELATIVE_BRANCH | DIS_FMT_FLAGS_C_HEX,
|
---|
9604 | NULL /*pfnGetSymbol*/, NULL /*pvUser*/);
|
---|
9605 | pHlp->pfnPrintf(pHlp, " %%%%%RGp: %s\n", GCPhysPc, szDisBuf);
|
---|
9606 | GCPhysPc += cbInstr;
|
---|
9607 | off += cbInstr;
|
---|
9608 | }
|
---|
9609 | else
|
---|
9610 | {
|
---|
9611 | pHlp->pfnPrintf(pHlp, " %%%%%RGp: %.*Rhxs - disassembly failure %Rrc\n",
|
---|
9612 | GCPhysPc, cbOpcodes - off, &pTb->pabOpcodes[off], rc);
|
---|
9613 | break;
|
---|
9614 | }
|
---|
9615 | }
|
---|
9616 | }
|
---|
9617 |
|
---|
9618 | /*
|
---|
9619 | * Then the native code:
|
---|
9620 | */
|
---|
9621 | pHlp->pfnPrintf(pHlp, " Native code %p L %#x\n", paNative, cNative);
|
---|
9622 | while (offNative < cNative)
|
---|
9623 | {
|
---|
9624 | PCIEMNATIVEINSTR const pNativeCur = &paNative[offNative];
|
---|
9625 | # ifndef VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER
|
---|
9626 | uint32_t cbInstr = sizeof(paNative[0]);
|
---|
9627 | int const rc = DISInstr(pNativeCur, enmHstCpuMode, &Dis, &cbInstr);
|
---|
9628 | if (RT_SUCCESS(rc))
|
---|
9629 | {
|
---|
9630 | # if defined(RT_ARCH_AMD64)
|
---|
9631 | if (Dis.pCurInstr->uOpcode == OP_NOP && cbInstr == 7) /* iemNativeEmitMarker */
|
---|
9632 | {
|
---|
9633 | uint32_t const uInfo = *(uint32_t const *)&Dis.Instr.ab[3];
|
---|
9634 | if (RT_HIWORD(uInfo) < kIemThreadedFunc_End)
|
---|
9635 | pHlp->pfnPrintf(pHlp, "\n %p: nop ; marker: call #%u to %s (%u args) - %s\n",
|
---|
9636 | pNativeCur, uInfo & 0x7fff, g_apszIemThreadedFunctions[RT_HIWORD(uInfo)],
|
---|
9637 | g_acIemThreadedFunctionUsedArgs[RT_HIWORD(uInfo)],
|
---|
9638 | uInfo & 0x8000 ? "recompiled" : "todo");
|
---|
9639 | else if ((uInfo & ~RT_BIT_32(31)) < RT_ELEMENTS(a_apszMarkers))
|
---|
9640 | pHlp->pfnPrintf(pHlp, " %p: nop ; marker: %s\n", pNativeCur, a_apszMarkers[uInfo & ~RT_BIT_32(31)]);
|
---|
9641 | else
|
---|
9642 | pHlp->pfnPrintf(pHlp, " %p: nop ; unknown marker: %#x (%d)\n", pNativeCur, uInfo, uInfo);
|
---|
9643 | }
|
---|
9644 | else
|
---|
9645 | # endif
|
---|
9646 | {
|
---|
9647 | # ifdef RT_ARCH_AMD64
|
---|
9648 | DISFormatYasmEx(&Dis, szDisBuf, sizeof(szDisBuf),
|
---|
9649 | DIS_FMT_FLAGS_BYTES_WIDTH_MAKE(10) | DIS_FMT_FLAGS_BYTES_LEFT
|
---|
9650 | | DIS_FMT_FLAGS_RELATIVE_BRANCH | DIS_FMT_FLAGS_C_HEX,
|
---|
9651 | iemNativeDisasmGetSymbolCb, &SymCtx);
|
---|
9652 | # elif defined(RT_ARCH_ARM64)
|
---|
9653 | DISFormatArmV8Ex(&Dis, szDisBuf, sizeof(szDisBuf),
|
---|
9654 | DIS_FMT_FLAGS_BYTES_LEFT | DIS_FMT_FLAGS_RELATIVE_BRANCH | DIS_FMT_FLAGS_C_HEX,
|
---|
9655 | iemNativeDisasmGetSymbolCb, &SymCtx);
|
---|
9656 | # else
|
---|
9657 | # error "Port me"
|
---|
9658 | # endif
|
---|
9659 | pHlp->pfnPrintf(pHlp, " %p: %s\n", pNativeCur, szDisBuf);
|
---|
9660 | }
|
---|
9661 | }
|
---|
9662 | else
|
---|
9663 | {
|
---|
9664 | # if defined(RT_ARCH_AMD64)
|
---|
9665 | pHlp->pfnPrintf(pHlp, " %p: %.*Rhxs - disassembly failure %Rrc\n",
|
---|
9666 | pNativeCur, RT_MIN(cNative - offNative, 16), pNativeCur, rc);
|
---|
9667 | # else
|
---|
9668 | pHlp->pfnPrintf(pHlp, " %p: %#010RX32 - disassembly failure %Rrc\n", pNativeCur, *pNativeCur, rc);
|
---|
9669 | # endif
|
---|
9670 | cbInstr = sizeof(paNative[0]);
|
---|
9671 | }
|
---|
9672 | offNative += cbInstr / sizeof(paNative[0]);
|
---|
9673 |
|
---|
9674 | # else /* VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER */
|
---|
9675 | cs_insn *pInstr;
|
---|
9676 | size_t cInstrs = cs_disasm(hDisasm, (const uint8_t *)pNativeCur, (cNative - offNative) * sizeof(*pNativeCur),
|
---|
9677 | (uintptr_t)pNativeCur, 1, &pInstr);
|
---|
9678 | if (cInstrs > 0)
|
---|
9679 | {
|
---|
9680 | Assert(cInstrs == 1);
|
---|
9681 | const char * const pszAnnotation = iemNativeDisasmAnnotateCapstone(&SymCtx, pInstr, szDisBuf, sizeof(szDisBuf));
|
---|
9682 | size_t const cchOp = strlen(pInstr->op_str);
|
---|
9683 | # if defined(RT_ARCH_AMD64)
|
---|
9684 | if (pszAnnotation)
|
---|
9685 | pHlp->pfnPrintf(pHlp, " %p: %.*Rhxs %-7s %s%*s ; %s\n",
|
---|
9686 | pNativeCur, pInstr->size, pNativeCur, pInstr->mnemonic, pInstr->op_str,
|
---|
9687 | cchOp < 55 ? 55 - cchOp : 0, "", pszAnnotation);
|
---|
9688 | else
|
---|
9689 | pHlp->pfnPrintf(pHlp, " %p: %.*Rhxs %-7s %s\n",
|
---|
9690 | pNativeCur, pInstr->size, pNativeCur, pInstr->mnemonic, pInstr->op_str);
|
---|
9691 |
|
---|
9692 | # else
|
---|
9693 | if (pszAnnotation)
|
---|
9694 | pHlp->pfnPrintf(pHlp, " %p: %#010RX32 %-7s %s%*s ; %s\n",
|
---|
9695 | pNativeCur, *pNativeCur, pInstr->mnemonic, pInstr->op_str,
|
---|
9696 | cchOp < 55 ? 55 - cchOp : 0, "", pszAnnotation);
|
---|
9697 | else
|
---|
9698 | pHlp->pfnPrintf(pHlp, " %p: %#010RX32 %-7s %s\n",
|
---|
9699 | pNativeCur, *pNativeCur, pInstr->mnemonic, pInstr->op_str);
|
---|
9700 | # endif
|
---|
9701 | offNative += pInstr->size / sizeof(*pNativeCur);
|
---|
9702 | cs_free(pInstr, cInstrs);
|
---|
9703 | }
|
---|
9704 | else
|
---|
9705 | {
|
---|
9706 | # if defined(RT_ARCH_AMD64)
|
---|
9707 | pHlp->pfnPrintf(pHlp, " %p: %.*Rhxs - disassembly failure %d\n",
|
---|
9708 | pNativeCur, RT_MIN(cNative - offNative, 16), pNativeCur, cs_errno(hDisasm)));
|
---|
9709 | # else
|
---|
9710 | pHlp->pfnPrintf(pHlp, " %p: %#010RX32 - disassembly failure %d\n", pNativeCur, *pNativeCur, cs_errno(hDisasm));
|
---|
9711 | # endif
|
---|
9712 | offNative++;
|
---|
9713 | }
|
---|
9714 | # endif /* VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER */
|
---|
9715 | }
|
---|
9716 | }
|
---|
9717 |
|
---|
9718 | #ifdef VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER
|
---|
9719 | /* Cleanup. */
|
---|
9720 | cs_close(&hDisasm);
|
---|
9721 | #endif
|
---|
9722 | }
|
---|
9723 |
|
---|
9724 |
|
---|
9725 | #ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
9726 |
|
---|
9727 | /** Emit alignment padding between labels / functions. */
|
---|
9728 | DECL_INLINE_THROW(uint32_t)
|
---|
9729 | iemNativeRecompileEmitAlignmentPadding(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fAlignMask)
|
---|
9730 | {
|
---|
9731 | if (off & fAlignMask)
|
---|
9732 | {
|
---|
9733 | PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, fAlignMask + 1);
|
---|
9734 | while (off & fAlignMask)
|
---|
9735 | # if defined(RT_ARCH_AMD64)
|
---|
9736 | pCodeBuf[off++] = 0xcc;
|
---|
9737 | # elif defined(RT_ARCH_ARM64)
|
---|
9738 | pCodeBuf[off++] = Armv8A64MkInstrBrk(0xcccc);
|
---|
9739 | # else
|
---|
9740 | # error "port me"
|
---|
9741 | # endif
|
---|
9742 | }
|
---|
9743 | return off;
|
---|
9744 | }
|
---|
9745 |
|
---|
9746 |
|
---|
9747 | /**
|
---|
9748 | * Called when a new chunk is allocate to emit common per-chunk code.
|
---|
9749 | *
|
---|
9750 | * Allocates a per-chunk context directly from the chunk itself and place the
|
---|
9751 | * common code there.
|
---|
9752 | *
|
---|
9753 | * @returns Pointer to the chunk context start.
|
---|
9754 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
9755 | * thread.
|
---|
9756 | * @param idxChunk The index of the chunk being added and requiring a
|
---|
9757 | * common code context.
|
---|
9758 | */
|
---|
9759 | DECLHIDDEN(PCIEMNATIVEPERCHUNKCTX) iemNativeRecompileAttachExecMemChunkCtx(PVMCPU pVCpu, uint32_t idxChunk)
|
---|
9760 | {
|
---|
9761 | /*
|
---|
9762 | * Allocate a new recompiler state (since we're likely to be called while
|
---|
9763 | * the default one is fully loaded already with a recompiled TB).
|
---|
9764 | *
|
---|
9765 | * This is a bit of overkill, but this isn't a frequently used code path.
|
---|
9766 | */
|
---|
9767 | PIEMRECOMPILERSTATE pReNative = iemNativeInit(pVCpu, NULL);
|
---|
9768 | AssertReturn(pReNative, NULL);
|
---|
9769 |
|
---|
9770 | # if defined(RT_ARCH_AMD64)
|
---|
9771 | uint32_t const fAlignMask = 15;
|
---|
9772 | # elif defined(RT_ARCH_ARM64)
|
---|
9773 | uint32_t const fAlignMask = 31 / 4;
|
---|
9774 | # else
|
---|
9775 | # error "port me"
|
---|
9776 | # endif
|
---|
9777 | uint32_t aoffLabels[kIemNativeLabelType_LastTbExit + 1] = {0};
|
---|
9778 | int rc = VINF_SUCCESS;
|
---|
9779 | uint32_t off = 0;
|
---|
9780 |
|
---|
9781 | IEMNATIVE_TRY_SETJMP(pReNative, rc)
|
---|
9782 | {
|
---|
9783 | /*
|
---|
9784 | * Emit the epilog code.
|
---|
9785 | */
|
---|
9786 | aoffLabels[kIemNativeLabelType_Return] = off;
|
---|
9787 | off = iemNativeEmitCoreEpilog(pReNative, off);
|
---|
9788 |
|
---|
9789 | /*
|
---|
9790 | * Generate special jump labels. All of these gets a copy of the epilog code.
|
---|
9791 | */
|
---|
9792 | static struct
|
---|
9793 | {
|
---|
9794 | IEMNATIVELABELTYPE enmExitReason;
|
---|
9795 | uint32_t (*pfnEmitCore)(PIEMRECOMPILERSTATE pReNative, uint32_t off);
|
---|
9796 | } const s_aSpecialWithEpilogs[] =
|
---|
9797 | {
|
---|
9798 | { kIemNativeLabelType_NonZeroRetOrPassUp, iemNativeEmitCoreRcFiddling },
|
---|
9799 | { kIemNativeLabelType_ReturnBreak, iemNativeEmitCoreReturnBreak },
|
---|
9800 | { kIemNativeLabelType_ReturnBreakFF, iemNativeEmitCoreReturnBreakFF },
|
---|
9801 | { kIemNativeLabelType_ReturnWithFlags, iemNativeEmitCoreReturnWithFlags },
|
---|
9802 | };
|
---|
9803 | for (uint32_t i = 0; i < RT_ELEMENTS(s_aSpecialWithEpilogs); i++)
|
---|
9804 | {
|
---|
9805 | off = iemNativeRecompileEmitAlignmentPadding(pReNative, off, fAlignMask);
|
---|
9806 | Assert(aoffLabels[s_aSpecialWithEpilogs[i].enmExitReason] == 0);
|
---|
9807 | aoffLabels[s_aSpecialWithEpilogs[i].enmExitReason] = off;
|
---|
9808 | off = s_aSpecialWithEpilogs[i].pfnEmitCore(pReNative, off);
|
---|
9809 | off = iemNativeEmitCoreEpilog(pReNative, off);
|
---|
9810 | }
|
---|
9811 |
|
---|
9812 | /*
|
---|
9813 | * Do what iemNativeEmitReturnBreakViaLookup does.
|
---|
9814 | */
|
---|
9815 | static struct
|
---|
9816 | {
|
---|
9817 | IEMNATIVELABELTYPE enmExitReason;
|
---|
9818 | uintptr_t pfnHelper;
|
---|
9819 | } const s_aViaLookup[] =
|
---|
9820 | {
|
---|
9821 | { kIemNativeLabelType_ReturnBreakViaLookup,
|
---|
9822 | (uintptr_t)iemNativeHlpReturnBreakViaLookup<false /*a_fWithIrqCheck*/> },
|
---|
9823 | { kIemNativeLabelType_ReturnBreakViaLookupWithIrq,
|
---|
9824 | (uintptr_t)iemNativeHlpReturnBreakViaLookup<true /*a_fWithIrqCheck*/> },
|
---|
9825 | { kIemNativeLabelType_ReturnBreakViaLookupWithTlb,
|
---|
9826 | (uintptr_t)iemNativeHlpReturnBreakViaLookupWithTlb<false /*a_fWithIrqCheck*/> },
|
---|
9827 | { kIemNativeLabelType_ReturnBreakViaLookupWithTlbAndIrq,
|
---|
9828 | (uintptr_t)iemNativeHlpReturnBreakViaLookupWithTlb<true /*a_fWithIrqCheck*/> },
|
---|
9829 | };
|
---|
9830 | uint32_t const offReturnBreak = aoffLabels[kIemNativeLabelType_ReturnBreak]; Assert(offReturnBreak != 0);
|
---|
9831 | for (uint32_t i = 0; i < RT_ELEMENTS(s_aViaLookup); i++)
|
---|
9832 | {
|
---|
9833 | off = iemNativeRecompileEmitAlignmentPadding(pReNative, off, fAlignMask);
|
---|
9834 | Assert(aoffLabels[s_aViaLookup[i].enmExitReason] == 0);
|
---|
9835 | aoffLabels[s_aViaLookup[i].enmExitReason] = off;
|
---|
9836 | off = iemNativeEmitCoreViaLookupDoOne(pReNative, off, offReturnBreak, s_aViaLookup[i].pfnHelper);
|
---|
9837 | }
|
---|
9838 |
|
---|
9839 | /*
|
---|
9840 | * Generate simple TB tail labels that just calls a help with a pVCpu
|
---|
9841 | * arg and either return or longjmps/throws a non-zero status.
|
---|
9842 | */
|
---|
9843 | typedef IEM_DECL_NATIVE_HLP_PTR(int, PFNIEMNATIVESIMPLETAILLABELCALL,(PVMCPUCC pVCpu));
|
---|
9844 | static struct
|
---|
9845 | {
|
---|
9846 | IEMNATIVELABELTYPE enmExitReason;
|
---|
9847 | bool fWithEpilog;
|
---|
9848 | PFNIEMNATIVESIMPLETAILLABELCALL pfnCallback;
|
---|
9849 | } const s_aSimpleTailLabels[] =
|
---|
9850 | {
|
---|
9851 | { kIemNativeLabelType_RaiseDe, false, iemNativeHlpExecRaiseDe },
|
---|
9852 | { kIemNativeLabelType_RaiseUd, false, iemNativeHlpExecRaiseUd },
|
---|
9853 | { kIemNativeLabelType_RaiseSseRelated, false, iemNativeHlpExecRaiseSseRelated },
|
---|
9854 | { kIemNativeLabelType_RaiseAvxRelated, false, iemNativeHlpExecRaiseAvxRelated },
|
---|
9855 | { kIemNativeLabelType_RaiseSseAvxFpRelated, false, iemNativeHlpExecRaiseSseAvxFpRelated },
|
---|
9856 | { kIemNativeLabelType_RaiseNm, false, iemNativeHlpExecRaiseNm },
|
---|
9857 | { kIemNativeLabelType_RaiseGp0, false, iemNativeHlpExecRaiseGp0 },
|
---|
9858 | { kIemNativeLabelType_RaiseMf, false, iemNativeHlpExecRaiseMf },
|
---|
9859 | { kIemNativeLabelType_RaiseXf, false, iemNativeHlpExecRaiseXf },
|
---|
9860 | { kIemNativeLabelType_ObsoleteTb, true, iemNativeHlpObsoleteTb },
|
---|
9861 | { kIemNativeLabelType_NeedCsLimChecking, true, iemNativeHlpNeedCsLimChecking },
|
---|
9862 | { kIemNativeLabelType_CheckBranchMiss, true, iemNativeHlpCheckBranchMiss },
|
---|
9863 | };
|
---|
9864 | for (uint32_t i = 0; i < RT_ELEMENTS(s_aSimpleTailLabels); i++)
|
---|
9865 | {
|
---|
9866 | off = iemNativeRecompileEmitAlignmentPadding(pReNative, off, fAlignMask);
|
---|
9867 | Assert(!aoffLabels[s_aSimpleTailLabels[i].enmExitReason]);
|
---|
9868 | aoffLabels[s_aSimpleTailLabels[i].enmExitReason] = off;
|
---|
9869 |
|
---|
9870 | /* int pfnCallback(PVMCPUCC pVCpu) */
|
---|
9871 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
9872 | off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)s_aSimpleTailLabels[i].pfnCallback);
|
---|
9873 |
|
---|
9874 | /* jump back to the return sequence / generate a return sequence. */
|
---|
9875 | if (!s_aSimpleTailLabels[i].fWithEpilog)
|
---|
9876 | off = iemNativeEmitJmpToFixed(pReNative, off, aoffLabels[kIemNativeLabelType_Return]);
|
---|
9877 | else
|
---|
9878 | off = iemNativeEmitCoreEpilog(pReNative, off);
|
---|
9879 | }
|
---|
9880 |
|
---|
9881 |
|
---|
9882 | # ifdef VBOX_STRICT
|
---|
9883 | /* Make sure we've generate code for all labels. */
|
---|
9884 | for (uint32_t i = kIemNativeLabelType_Invalid + 1; i < RT_ELEMENTS(aoffLabels); i++)
|
---|
9885 | Assert(aoffLabels[i] != 0 || i == kIemNativeLabelType_Return);
|
---|
9886 | #endif
|
---|
9887 | }
|
---|
9888 | IEMNATIVE_CATCH_LONGJMP_BEGIN(pReNative, rc);
|
---|
9889 | {
|
---|
9890 | Log(("iemNativeRecompileAttachExecMemChunkCtx: Caught %Rrc while recompiling!\n", rc));
|
---|
9891 | iemNativeTerm(pReNative);
|
---|
9892 | return NULL;
|
---|
9893 | }
|
---|
9894 | IEMNATIVE_CATCH_LONGJMP_END(pReNative);
|
---|
9895 |
|
---|
9896 | /*
|
---|
9897 | * Allocate memory for the context (first) and the common code (last).
|
---|
9898 | */
|
---|
9899 | PIEMNATIVEPERCHUNKCTX pCtx;
|
---|
9900 | uint32_t const cbCtx = RT_ALIGN_32(sizeof(*pCtx), 64);
|
---|
9901 | uint32_t const cbCode = off * sizeof(IEMNATIVEINSTR);
|
---|
9902 | PIEMNATIVEINSTR paFinalCommonCodeRx = NULL;
|
---|
9903 | pCtx = (PIEMNATIVEPERCHUNKCTX)iemExecMemAllocatorAllocFromChunk(pVCpu, idxChunk, cbCtx + cbCode, &paFinalCommonCodeRx);
|
---|
9904 | AssertLogRelMsgReturn(pCtx, ("cbCtx=%#x cbCode=%#x idxChunk=%#x\n", cbCtx, cbCode, idxChunk), NULL);
|
---|
9905 |
|
---|
9906 | /*
|
---|
9907 | * Copy over the generated code.
|
---|
9908 | * There should be no fixups or labels defined here.
|
---|
9909 | */
|
---|
9910 | paFinalCommonCodeRx = (PIEMNATIVEINSTR)((uintptr_t)paFinalCommonCodeRx + cbCtx);
|
---|
9911 | memcpy((PIEMNATIVEINSTR)((uintptr_t)pCtx + cbCtx), pReNative->pInstrBuf, cbCode);
|
---|
9912 |
|
---|
9913 | Assert(pReNative->cFixups == 0);
|
---|
9914 | Assert(pReNative->cLabels == 0);
|
---|
9915 |
|
---|
9916 | /*
|
---|
9917 | * Initialize the context.
|
---|
9918 | */
|
---|
9919 | AssertCompile(kIemNativeLabelType_Invalid == 0);
|
---|
9920 | AssertCompile(RT_ELEMENTS(pCtx->apExitLabels) == RT_ELEMENTS(aoffLabels));
|
---|
9921 | pCtx->apExitLabels[kIemNativeLabelType_Invalid] = 0;
|
---|
9922 | for (uint32_t i = kIemNativeLabelType_Invalid + 1; i < RT_ELEMENTS(pCtx->apExitLabels); i++)
|
---|
9923 | {
|
---|
9924 | Assert(aoffLabels[i] != 0 || i == kIemNativeLabelType_Return);
|
---|
9925 | pCtx->apExitLabels[i] = &paFinalCommonCodeRx[aoffLabels[i]];
|
---|
9926 | Log10((" apExitLabels[%u]=%p %s\n", i, pCtx->apExitLabels[i], iemNativeGetLabelName((IEMNATIVELABELTYPE)i, true)));
|
---|
9927 | }
|
---|
9928 |
|
---|
9929 | iemExecMemAllocatorReadyForUse(pVCpu, pCtx, cbCtx + cbCode);
|
---|
9930 |
|
---|
9931 | iemNativeTerm(pReNative);
|
---|
9932 | return pCtx;
|
---|
9933 | }
|
---|
9934 |
|
---|
9935 | #endif /* IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE */
|
---|
9936 |
|
---|
9937 | /**
|
---|
9938 | * Recompiles the given threaded TB into a native one.
|
---|
9939 | *
|
---|
9940 | * In case of failure the translation block will be returned as-is.
|
---|
9941 | *
|
---|
9942 | * @returns pTb.
|
---|
9943 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
9944 | * thread.
|
---|
9945 | * @param pTb The threaded translation to recompile to native.
|
---|
9946 | */
|
---|
9947 | DECLHIDDEN(PIEMTB) iemNativeRecompile(PVMCPUCC pVCpu, PIEMTB pTb) RT_NOEXCEPT
|
---|
9948 | {
|
---|
9949 | #if 0 /* For profiling the native recompiler code. */
|
---|
9950 | l_profile_again:
|
---|
9951 | #endif
|
---|
9952 | STAM_REL_PROFILE_START(&pVCpu->iem.s.StatNativeRecompilation, a);
|
---|
9953 |
|
---|
9954 | /*
|
---|
9955 | * The first time thru, we allocate the recompiler state and save it,
|
---|
9956 | * all the other times we'll just reuse the saved one after a quick reset.
|
---|
9957 | */
|
---|
9958 | PIEMRECOMPILERSTATE pReNative = pVCpu->iem.s.pNativeRecompilerStateR3;
|
---|
9959 | if (RT_LIKELY(pReNative))
|
---|
9960 | iemNativeReInit(pReNative, pTb);
|
---|
9961 | else
|
---|
9962 | {
|
---|
9963 | pReNative = iemNativeInit(pVCpu, pTb);
|
---|
9964 | AssertReturn(pReNative, pTb);
|
---|
9965 | pVCpu->iem.s.pNativeRecompilerStateR3 = pReNative; /* save it */
|
---|
9966 | }
|
---|
9967 |
|
---|
9968 | #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
|
---|
9969 | /*
|
---|
9970 | * First do liveness analysis. This is done backwards.
|
---|
9971 | */
|
---|
9972 | {
|
---|
9973 | uint32_t idxCall = pTb->Thrd.cCalls;
|
---|
9974 | if (idxCall <= pReNative->cLivenessEntriesAlloc)
|
---|
9975 | { /* likely */ }
|
---|
9976 | else
|
---|
9977 | {
|
---|
9978 | uint32_t cAlloc = RT_MAX(pReNative->cLivenessEntriesAlloc, _4K);
|
---|
9979 | while (idxCall > cAlloc)
|
---|
9980 | cAlloc *= 2;
|
---|
9981 | void *pvNew = RTMemRealloc(pReNative->paLivenessEntries, sizeof(pReNative->paLivenessEntries[0]) * cAlloc);
|
---|
9982 | AssertReturn(pvNew, pTb);
|
---|
9983 | pReNative->paLivenessEntries = (PIEMLIVENESSENTRY)pvNew;
|
---|
9984 | pReNative->cLivenessEntriesAlloc = cAlloc;
|
---|
9985 | }
|
---|
9986 | AssertReturn(idxCall > 0, pTb);
|
---|
9987 | PIEMLIVENESSENTRY const paLivenessEntries = pReNative->paLivenessEntries;
|
---|
9988 |
|
---|
9989 | /* The initial (final) entry. */
|
---|
9990 | idxCall--;
|
---|
9991 | IEM_LIVENESS_RAW_INIT_AS_UNUSED(&paLivenessEntries[idxCall]);
|
---|
9992 |
|
---|
9993 | /* Loop backwards thru the calls and fill in the other entries. */
|
---|
9994 | PCIEMTHRDEDCALLENTRY pCallEntry = &pTb->Thrd.paCalls[idxCall];
|
---|
9995 | while (idxCall > 0)
|
---|
9996 | {
|
---|
9997 | PFNIEMNATIVELIVENESSFUNC const pfnLiveness = g_apfnIemNativeLivenessFunctions[pCallEntry->enmFunction];
|
---|
9998 | if (pfnLiveness)
|
---|
9999 | pfnLiveness(pCallEntry, &paLivenessEntries[idxCall], &paLivenessEntries[idxCall - 1]);
|
---|
10000 | else
|
---|
10001 | IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(&paLivenessEntries[idxCall - 1], &paLivenessEntries[idxCall]);
|
---|
10002 | pCallEntry--;
|
---|
10003 | idxCall--;
|
---|
10004 | }
|
---|
10005 |
|
---|
10006 | # ifdef VBOX_WITH_STATISTICS
|
---|
10007 | /* Check if there are any EFLAGS optimization to be had here. This requires someone settings them
|
---|
10008 | to 'clobbered' rather that 'input'. */
|
---|
10009 | /** @todo */
|
---|
10010 | # endif
|
---|
10011 | }
|
---|
10012 | #endif
|
---|
10013 |
|
---|
10014 | /*
|
---|
10015 | * Recompiling and emitting code is done using try/throw/catch or setjmp/longjmp
|
---|
10016 | * for aborting if an error happens.
|
---|
10017 | */
|
---|
10018 | uint32_t cCallsLeft = pTb->Thrd.cCalls;
|
---|
10019 | #ifdef LOG_ENABLED
|
---|
10020 | uint32_t const cCallsOrg = cCallsLeft;
|
---|
10021 | #endif
|
---|
10022 | uint32_t off = 0;
|
---|
10023 | int rc = VINF_SUCCESS;
|
---|
10024 | IEMNATIVE_TRY_SETJMP(pReNative, rc)
|
---|
10025 | {
|
---|
10026 | #ifndef IEMNATIVE_WITH_RECOMPILER_PROLOGUE_SINGLETON
|
---|
10027 | /*
|
---|
10028 | * Emit prolog code (fixed).
|
---|
10029 | */
|
---|
10030 | off = iemNativeEmitProlog(pReNative, off);
|
---|
10031 | #endif
|
---|
10032 |
|
---|
10033 | /*
|
---|
10034 | * Convert the calls to native code.
|
---|
10035 | */
|
---|
10036 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
10037 | int32_t iGstInstr = -1;
|
---|
10038 | #endif
|
---|
10039 | #ifndef VBOX_WITHOUT_RELEASE_STATISTICS
|
---|
10040 | uint32_t cThreadedCalls = 0;
|
---|
10041 | uint32_t cRecompiledCalls = 0;
|
---|
10042 | #endif
|
---|
10043 | #if defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS) || defined(IEM_WITH_INTRA_TB_JUMPS) || defined(VBOX_STRICT) || defined(LOG_ENABLED) || defined(VBOX_WITH_STATISTICS) || defined(IEMNATIVE_WITH_DELAYED_PC_UPDATING)
|
---|
10044 | uint32_t idxCurCall = 0;
|
---|
10045 | #endif
|
---|
10046 | PCIEMTHRDEDCALLENTRY pCallEntry = pTb->Thrd.paCalls;
|
---|
10047 | pReNative->fExec = pTb->fFlags & IEMTB_F_IEM_F_MASK;
|
---|
10048 | while (cCallsLeft-- > 0)
|
---|
10049 | {
|
---|
10050 | PFNIEMNATIVERECOMPFUNC const pfnRecom = g_apfnIemNativeRecompileFunctions[pCallEntry->enmFunction];
|
---|
10051 | #if defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS) || defined(VBOX_WITH_STATISTICS) || defined(IEMNATIVE_WITH_DELAYED_PC_UPDATING)
|
---|
10052 | pReNative->idxCurCall = idxCurCall;
|
---|
10053 | #endif
|
---|
10054 |
|
---|
10055 | #ifdef IEM_WITH_INTRA_TB_JUMPS
|
---|
10056 | /*
|
---|
10057 | * Define label for jump targets (currently only the first entry).
|
---|
10058 | */
|
---|
10059 | if (!(pCallEntry->fFlags & IEMTHREADEDCALLENTRY_F_JUMP_TARGET))
|
---|
10060 | { /* likely */ }
|
---|
10061 | else
|
---|
10062 | {
|
---|
10063 | iemNativeLabelCreate(pReNative, kIemNativeLabelType_LoopJumpTarget, off);
|
---|
10064 | Assert(idxCurCall == 0); /** @todo when jumping elsewhere, we have to save the register state. */
|
---|
10065 | }
|
---|
10066 | #endif
|
---|
10067 |
|
---|
10068 | /*
|
---|
10069 | * Debug info, assembly markup and statistics.
|
---|
10070 | */
|
---|
10071 | #if defined(IEMNATIVE_WITH_TB_DEBUG_INFO) || !defined(IEMNATIVE_WITH_BLTIN_CHECKMODE)
|
---|
10072 | if (pCallEntry->enmFunction == kIemThreadedFunc_BltIn_CheckMode)
|
---|
10073 | pReNative->fExec = pCallEntry->auParams[0] & IEMTB_F_IEM_F_MASK;
|
---|
10074 | #endif
|
---|
10075 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
10076 | iemNativeDbgInfoAddNativeOffset(pReNative, off);
|
---|
10077 | if (iGstInstr < (int32_t)pCallEntry->idxInstr)
|
---|
10078 | {
|
---|
10079 | if (iGstInstr < (int32_t)pTb->cInstructions)
|
---|
10080 | iemNativeDbgInfoAddGuestInstruction(pReNative, pReNative->fExec);
|
---|
10081 | else
|
---|
10082 | Assert(iGstInstr == pTb->cInstructions);
|
---|
10083 | iGstInstr = pCallEntry->idxInstr;
|
---|
10084 | }
|
---|
10085 | iemNativeDbgInfoAddThreadedCall(pReNative, (IEMTHREADEDFUNCS)pCallEntry->enmFunction, pfnRecom != NULL);
|
---|
10086 | #endif
|
---|
10087 | #if defined(VBOX_STRICT)
|
---|
10088 | off = iemNativeEmitMarker(pReNative, off,
|
---|
10089 | RT_MAKE_U32(idxCurCall | (pfnRecom ? 0x8000 : 0), pCallEntry->enmFunction));
|
---|
10090 | #endif
|
---|
10091 | #if defined(VBOX_STRICT)
|
---|
10092 | iemNativeRegAssertSanity(pReNative);
|
---|
10093 | #endif
|
---|
10094 | #ifdef VBOX_WITH_STATISTICS
|
---|
10095 | off = iemNativeEmitThreadCallStats(pReNative, off, pCallEntry);
|
---|
10096 | #endif
|
---|
10097 |
|
---|
10098 | #if 0
|
---|
10099 | if ( pTb->GCPhysPc == 0x00000000000c1240
|
---|
10100 | && idxCurCall == 67)
|
---|
10101 | off = iemNativeEmitBrk(pReNative, off, 0xf000);
|
---|
10102 | #endif
|
---|
10103 |
|
---|
10104 | /*
|
---|
10105 | * Actual work.
|
---|
10106 | */
|
---|
10107 | Log2(("%u[%u]: %s%s (off=%#x)\n", idxCurCall, pCallEntry->idxInstr,
|
---|
10108 | g_apszIemThreadedFunctions[pCallEntry->enmFunction], pfnRecom ? "(recompiled)" : "(todo)", off));
|
---|
10109 | if (pfnRecom) /** @todo stats on this. */
|
---|
10110 | {
|
---|
10111 | off = pfnRecom(pReNative, off, pCallEntry);
|
---|
10112 | STAM_REL_STATS({cRecompiledCalls++;});
|
---|
10113 | }
|
---|
10114 | else
|
---|
10115 | {
|
---|
10116 | off = iemNativeEmitThreadedCall(pReNative, off, pCallEntry);
|
---|
10117 | STAM_REL_STATS({cThreadedCalls++;});
|
---|
10118 | }
|
---|
10119 | Assert(off <= pReNative->cInstrBufAlloc);
|
---|
10120 | Assert(pReNative->cCondDepth == 0);
|
---|
10121 |
|
---|
10122 | #if defined(LOG_ENABLED) && defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS)
|
---|
10123 | if (LogIs2Enabled())
|
---|
10124 | {
|
---|
10125 | PCIEMLIVENESSENTRY pLivenessEntry = &pReNative->paLivenessEntries[idxCurCall];
|
---|
10126 | # ifndef IEMLIVENESS_EXTENDED_LAYOUT
|
---|
10127 | static const char s_achState[] = "CUXI";
|
---|
10128 | # else
|
---|
10129 | static const char s_achState[] = "UxRrWwMmCcQqKkNn";
|
---|
10130 | # endif
|
---|
10131 |
|
---|
10132 | char szGpr[17];
|
---|
10133 | for (unsigned i = 0; i < 16; i++)
|
---|
10134 | szGpr[i] = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_GprFirst)];
|
---|
10135 | szGpr[16] = '\0';
|
---|
10136 |
|
---|
10137 | char szSegBase[X86_SREG_COUNT + 1];
|
---|
10138 | char szSegLimit[X86_SREG_COUNT + 1];
|
---|
10139 | char szSegAttrib[X86_SREG_COUNT + 1];
|
---|
10140 | char szSegSel[X86_SREG_COUNT + 1];
|
---|
10141 | for (unsigned i = 0; i < X86_SREG_COUNT; i++)
|
---|
10142 | {
|
---|
10143 | szSegBase[i] = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_SegBaseFirst)];
|
---|
10144 | szSegAttrib[i] = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_SegAttribFirst)];
|
---|
10145 | szSegLimit[i] = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_SegLimitFirst)];
|
---|
10146 | szSegSel[i] = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_SegSelFirst)];
|
---|
10147 | }
|
---|
10148 | szSegBase[X86_SREG_COUNT] = szSegAttrib[X86_SREG_COUNT] = szSegLimit[X86_SREG_COUNT]
|
---|
10149 | = szSegSel[X86_SREG_COUNT] = '\0';
|
---|
10150 |
|
---|
10151 | char szEFlags[8];
|
---|
10152 | for (unsigned i = 0; i < 7; i++)
|
---|
10153 | szEFlags[i] = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_EFlags)];
|
---|
10154 | szEFlags[7] = '\0';
|
---|
10155 |
|
---|
10156 | Log2(("liveness: grp=%s segbase=%s segattr=%s seglim=%s segsel=%s efl=%s\n",
|
---|
10157 | szGpr, szSegBase, szSegAttrib, szSegLimit, szSegSel, szEFlags));
|
---|
10158 | }
|
---|
10159 | #endif
|
---|
10160 |
|
---|
10161 | /*
|
---|
10162 | * Advance.
|
---|
10163 | */
|
---|
10164 | pCallEntry++;
|
---|
10165 | #if defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS) || defined(IEM_WITH_INTRA_TB_JUMPS) || defined(VBOX_STRICT) || defined(LOG_ENABLED) || defined(VBOX_WITH_STATISTICS) || defined(IEMNATIVE_WITH_DELAYED_PC_UPDATING)
|
---|
10166 | idxCurCall++;
|
---|
10167 | #endif
|
---|
10168 | }
|
---|
10169 |
|
---|
10170 | STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->iem.s.StatNativeCallsRecompiled, cRecompiledCalls);
|
---|
10171 | STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->iem.s.StatNativeCallsThreaded, cThreadedCalls);
|
---|
10172 | if (!cThreadedCalls)
|
---|
10173 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeFullyRecompiledTbs);
|
---|
10174 |
|
---|
10175 | #ifdef VBOX_WITH_STATISTICS
|
---|
10176 | off = iemNativeEmitNativeTbExitStats(pReNative, off, RT_UOFFSETOF(VMCPUCC, iem.s.StatNativeTbFinished));
|
---|
10177 | #endif
|
---|
10178 |
|
---|
10179 | /* Flush any pending writes before returning from the last instruction (RIP updates, etc.). */
|
---|
10180 | off = iemNativeRegFlushPendingWrites(pReNative, off);
|
---|
10181 |
|
---|
10182 | /*
|
---|
10183 | * Successful return, so clear the return register (eax, w0).
|
---|
10184 | */
|
---|
10185 | off = iemNativeEmitGprZero(pReNative, off, IEMNATIVE_CALL_RET_GREG);
|
---|
10186 |
|
---|
10187 | #ifndef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
10188 | /*
|
---|
10189 | * Emit the epilog code.
|
---|
10190 | */
|
---|
10191 | uint32_t idxReturnLabel;
|
---|
10192 | off = iemNativeEmitEpilog(pReNative, off, &idxReturnLabel);
|
---|
10193 | #else
|
---|
10194 | /*
|
---|
10195 | * Jump to the common per-chunk epilog code.
|
---|
10196 | */
|
---|
10197 | //off = iemNativeEmitBrk(pReNative, off, 0x1227);
|
---|
10198 | off = iemNativeEmitTbExit(pReNative, off, kIemNativeLabelType_Return);
|
---|
10199 | #endif
|
---|
10200 |
|
---|
10201 | #ifndef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
10202 | /*
|
---|
10203 | * Generate special jump labels.
|
---|
10204 | */
|
---|
10205 | off = iemNativeEmitRcFiddling(pReNative, off, idxReturnLabel);
|
---|
10206 |
|
---|
10207 | bool const fReturnBreakViaLookup = RT_BOOL( pReNative->bmLabelTypes
|
---|
10208 | & ( RT_BIT_64(kIemNativeLabelType_ReturnBreakViaLookup)
|
---|
10209 | | RT_BIT_64(kIemNativeLabelType_ReturnBreakViaLookupWithIrq)
|
---|
10210 | | RT_BIT_64(kIemNativeLabelType_ReturnBreakViaLookupWithTlb)
|
---|
10211 | | RT_BIT_64(kIemNativeLabelType_ReturnBreakViaLookupWithTlbAndIrq)));
|
---|
10212 | if (fReturnBreakViaLookup)
|
---|
10213 | {
|
---|
10214 | uint32_t const idxReturnBreakLabel = iemNativeLabelCreate(pReNative, kIemNativeLabelType_ReturnBreak);
|
---|
10215 | off = iemNativeEmitReturnBreak(pReNative, off, idxReturnLabel);
|
---|
10216 | off = iemNativeEmitReturnBreakViaLookup(pReNative, off, idxReturnBreakLabel);
|
---|
10217 | }
|
---|
10218 | else if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_ReturnBreak))
|
---|
10219 | off = iemNativeEmitReturnBreak(pReNative, off, idxReturnLabel);
|
---|
10220 |
|
---|
10221 | if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_ReturnBreakFF))
|
---|
10222 | off = iemNativeEmitReturnBreakFF(pReNative, off, idxReturnLabel);
|
---|
10223 |
|
---|
10224 | if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_ReturnWithFlags))
|
---|
10225 | off = iemNativeEmitReturnWithFlags(pReNative, off, idxReturnLabel);
|
---|
10226 |
|
---|
10227 | /*
|
---|
10228 | * Generate simple TB tail labels that just calls a help with a pVCpu
|
---|
10229 | * arg and either return or longjmps/throws a non-zero status.
|
---|
10230 | *
|
---|
10231 | * The array entries must be ordered by enmLabel value so we can index
|
---|
10232 | * using fTailLabels bit numbers.
|
---|
10233 | */
|
---|
10234 | typedef IEM_DECL_NATIVE_HLP_PTR(int, PFNIEMNATIVESIMPLETAILLABELCALL,(PVMCPUCC pVCpu));
|
---|
10235 | static struct
|
---|
10236 | {
|
---|
10237 | IEMNATIVELABELTYPE enmLabel;
|
---|
10238 | PFNIEMNATIVESIMPLETAILLABELCALL pfnCallback;
|
---|
10239 | } const g_aSimpleTailLabels[] =
|
---|
10240 | {
|
---|
10241 | { kIemNativeLabelType_Invalid, NULL },
|
---|
10242 | { kIemNativeLabelType_RaiseDe, iemNativeHlpExecRaiseDe },
|
---|
10243 | { kIemNativeLabelType_RaiseUd, iemNativeHlpExecRaiseUd },
|
---|
10244 | { kIemNativeLabelType_RaiseSseRelated, iemNativeHlpExecRaiseSseRelated },
|
---|
10245 | { kIemNativeLabelType_RaiseAvxRelated, iemNativeHlpExecRaiseAvxRelated },
|
---|
10246 | { kIemNativeLabelType_RaiseSseAvxFpRelated, iemNativeHlpExecRaiseSseAvxFpRelated },
|
---|
10247 | { kIemNativeLabelType_RaiseNm, iemNativeHlpExecRaiseNm },
|
---|
10248 | { kIemNativeLabelType_RaiseGp0, iemNativeHlpExecRaiseGp0 },
|
---|
10249 | { kIemNativeLabelType_RaiseMf, iemNativeHlpExecRaiseMf },
|
---|
10250 | { kIemNativeLabelType_RaiseXf, iemNativeHlpExecRaiseXf },
|
---|
10251 | { kIemNativeLabelType_ObsoleteTb, iemNativeHlpObsoleteTb },
|
---|
10252 | { kIemNativeLabelType_NeedCsLimChecking, iemNativeHlpNeedCsLimChecking },
|
---|
10253 | { kIemNativeLabelType_CheckBranchMiss, iemNativeHlpCheckBranchMiss },
|
---|
10254 | };
|
---|
10255 |
|
---|
10256 | AssertCompile(RT_ELEMENTS(g_aSimpleTailLabels) == (unsigned)kIemNativeLabelType_LastSimple + 1U);
|
---|
10257 | AssertCompile(kIemNativeLabelType_Invalid == 0);
|
---|
10258 | uint64_t fTailLabels = pReNative->bmLabelTypes & (RT_BIT_64(kIemNativeLabelType_LastSimple + 1U) - 2U);
|
---|
10259 | if (fTailLabels)
|
---|
10260 | {
|
---|
10261 | do
|
---|
10262 | {
|
---|
10263 | IEMNATIVELABELTYPE const enmLabel = (IEMNATIVELABELTYPE)(ASMBitFirstSetU64(fTailLabels) - 1U);
|
---|
10264 | fTailLabels &= ~RT_BIT_64(enmLabel);
|
---|
10265 | Assert(g_aSimpleTailLabels[enmLabel].enmLabel == enmLabel);
|
---|
10266 |
|
---|
10267 | uint32_t const idxLabel = iemNativeLabelFind(pReNative, enmLabel);
|
---|
10268 | Assert(idxLabel != UINT32_MAX);
|
---|
10269 | if (idxLabel != UINT32_MAX)
|
---|
10270 | {
|
---|
10271 | iemNativeLabelDefine(pReNative, idxLabel, off);
|
---|
10272 |
|
---|
10273 | /* int pfnCallback(PVMCPUCC pVCpu) */
|
---|
10274 | off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
|
---|
10275 | off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)g_aSimpleTailLabels[enmLabel].pfnCallback);
|
---|
10276 |
|
---|
10277 | /* jump back to the return sequence. */
|
---|
10278 | off = iemNativeEmitJmpToLabel(pReNative, off, idxReturnLabel);
|
---|
10279 | }
|
---|
10280 |
|
---|
10281 | } while (fTailLabels);
|
---|
10282 | }
|
---|
10283 |
|
---|
10284 | #else /* IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE */
|
---|
10285 | /*
|
---|
10286 | * Generate tail labels with jumps to the common per-chunk code.
|
---|
10287 | */
|
---|
10288 | # ifndef RT_ARCH_AMD64
|
---|
10289 | Assert(!(pReNative->bmLabelTypes & (RT_BIT_64(kIemNativeLabelType_Return) | RT_BIT_64(kIemNativeLabelType_Invalid))));
|
---|
10290 | AssertCompile(kIemNativeLabelType_Invalid == 0);
|
---|
10291 | uint64_t fTailLabels = pReNative->bmLabelTypes & (RT_BIT_64(kIemNativeLabelType_LastTbExit + 1U) - 2U);
|
---|
10292 | if (fTailLabels)
|
---|
10293 | {
|
---|
10294 | do
|
---|
10295 | {
|
---|
10296 | IEMNATIVELABELTYPE const enmLabel = (IEMNATIVELABELTYPE)(ASMBitFirstSetU64(fTailLabels) - 1U);
|
---|
10297 | fTailLabels &= ~RT_BIT_64(enmLabel);
|
---|
10298 |
|
---|
10299 | uint32_t const idxLabel = iemNativeLabelFind(pReNative, enmLabel);
|
---|
10300 | AssertContinue(idxLabel != UINT32_MAX);
|
---|
10301 | iemNativeLabelDefine(pReNative, idxLabel, off);
|
---|
10302 | off = iemNativeEmitTbExit(pReNative, off, enmLabel);
|
---|
10303 | } while (fTailLabels);
|
---|
10304 | }
|
---|
10305 | # else
|
---|
10306 | Assert(!(pReNative->bmLabelTypes & (RT_BIT_64(kIemNativeLabelType_LastTbExit + 1) - 1U))); /* Should not be used! */
|
---|
10307 | # endif
|
---|
10308 | #endif /* IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE */
|
---|
10309 | }
|
---|
10310 | IEMNATIVE_CATCH_LONGJMP_BEGIN(pReNative, rc);
|
---|
10311 | {
|
---|
10312 | Log(("iemNativeRecompile: Caught %Rrc while recompiling!\n", rc));
|
---|
10313 | return pTb;
|
---|
10314 | }
|
---|
10315 | IEMNATIVE_CATCH_LONGJMP_END(pReNative);
|
---|
10316 | Assert(off <= pReNative->cInstrBufAlloc);
|
---|
10317 |
|
---|
10318 | /*
|
---|
10319 | * Make sure all labels has been defined.
|
---|
10320 | */
|
---|
10321 | PIEMNATIVELABEL const paLabels = pReNative->paLabels;
|
---|
10322 | #ifdef VBOX_STRICT
|
---|
10323 | uint32_t const cLabels = pReNative->cLabels;
|
---|
10324 | for (uint32_t i = 0; i < cLabels; i++)
|
---|
10325 | AssertMsgReturn(paLabels[i].off < off, ("i=%d enmType=%d\n", i, paLabels[i].enmType), pTb);
|
---|
10326 | #endif
|
---|
10327 |
|
---|
10328 | #if 0 /* For profiling the native recompiler code. */
|
---|
10329 | if (pTb->Thrd.cCalls >= 136)
|
---|
10330 | {
|
---|
10331 | STAM_REL_PROFILE_STOP(&pVCpu->iem.s.StatNativeRecompilation, a);
|
---|
10332 | goto l_profile_again;
|
---|
10333 | }
|
---|
10334 | #endif
|
---|
10335 |
|
---|
10336 | /*
|
---|
10337 | * Allocate executable memory, copy over the code we've generated.
|
---|
10338 | */
|
---|
10339 | PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
|
---|
10340 | if (pTbAllocator->pDelayedFreeHead)
|
---|
10341 | iemTbAllocatorProcessDelayedFrees(pVCpu, pVCpu->iem.s.pTbAllocatorR3);
|
---|
10342 |
|
---|
10343 | PIEMNATIVEINSTR paFinalInstrBufRx = NULL;
|
---|
10344 | #ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
10345 | PCIEMNATIVEPERCHUNKCTX pCtx = NULL;
|
---|
10346 | PIEMNATIVEINSTR const paFinalInstrBuf = iemExecMemAllocatorAlloc(pVCpu, off * sizeof(IEMNATIVEINSTR), pTb,
|
---|
10347 | &paFinalInstrBufRx, &pCtx);
|
---|
10348 |
|
---|
10349 | #else
|
---|
10350 | PIEMNATIVEINSTR const paFinalInstrBuf = iemExecMemAllocatorAlloc(pVCpu, off * sizeof(IEMNATIVEINSTR), pTb,
|
---|
10351 | &paFinalInstrBufRx, NULL);
|
---|
10352 | #endif
|
---|
10353 | AssertReturn(paFinalInstrBuf, pTb);
|
---|
10354 | memcpy(paFinalInstrBuf, pReNative->pInstrBuf, off * sizeof(paFinalInstrBuf[0]));
|
---|
10355 |
|
---|
10356 | /*
|
---|
10357 | * Apply fixups.
|
---|
10358 | */
|
---|
10359 | PIEMNATIVEFIXUP const paFixups = pReNative->paFixups;
|
---|
10360 | uint32_t const cFixups = pReNative->cFixups;
|
---|
10361 | for (uint32_t i = 0; i < cFixups; i++)
|
---|
10362 | {
|
---|
10363 | Assert(paFixups[i].off < off);
|
---|
10364 | Assert(paFixups[i].idxLabel < cLabels);
|
---|
10365 | AssertMsg(paLabels[paFixups[i].idxLabel].off < off,
|
---|
10366 | ("idxLabel=%d enmType=%d off=%#x (max %#x)\n", paFixups[i].idxLabel,
|
---|
10367 | paLabels[paFixups[i].idxLabel].enmType, paLabels[paFixups[i].idxLabel].off, off));
|
---|
10368 | RTPTRUNION const Ptr = { &paFinalInstrBuf[paFixups[i].off] };
|
---|
10369 | switch (paFixups[i].enmType)
|
---|
10370 | {
|
---|
10371 | #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
|
---|
10372 | case kIemNativeFixupType_Rel32:
|
---|
10373 | Assert(paFixups[i].off + 4 <= off);
|
---|
10374 | *Ptr.pi32 = paLabels[paFixups[i].idxLabel].off - paFixups[i].off + paFixups[i].offAddend;
|
---|
10375 | continue;
|
---|
10376 |
|
---|
10377 | #elif defined(RT_ARCH_ARM64)
|
---|
10378 | case kIemNativeFixupType_RelImm26At0:
|
---|
10379 | {
|
---|
10380 | Assert(paFixups[i].off < off);
|
---|
10381 | int32_t const offDisp = paLabels[paFixups[i].idxLabel].off - paFixups[i].off + paFixups[i].offAddend;
|
---|
10382 | Assert(offDisp >= -33554432 && offDisp < 33554432);
|
---|
10383 | *Ptr.pu32 = (*Ptr.pu32 & UINT32_C(0xfc000000)) | ((uint32_t)offDisp & UINT32_C(0x03ffffff));
|
---|
10384 | continue;
|
---|
10385 | }
|
---|
10386 |
|
---|
10387 | case kIemNativeFixupType_RelImm19At5:
|
---|
10388 | {
|
---|
10389 | Assert(paFixups[i].off < off);
|
---|
10390 | int32_t const offDisp = paLabels[paFixups[i].idxLabel].off - paFixups[i].off + paFixups[i].offAddend;
|
---|
10391 | Assert(offDisp >= -262144 && offDisp < 262144);
|
---|
10392 | *Ptr.pu32 = (*Ptr.pu32 & UINT32_C(0xff00001f)) | (((uint32_t)offDisp & UINT32_C(0x0007ffff)) << 5);
|
---|
10393 | continue;
|
---|
10394 | }
|
---|
10395 |
|
---|
10396 | case kIemNativeFixupType_RelImm14At5:
|
---|
10397 | {
|
---|
10398 | Assert(paFixups[i].off < off);
|
---|
10399 | int32_t const offDisp = paLabels[paFixups[i].idxLabel].off - paFixups[i].off + paFixups[i].offAddend;
|
---|
10400 | Assert(offDisp >= -8192 && offDisp < 8192);
|
---|
10401 | *Ptr.pu32 = (*Ptr.pu32 & UINT32_C(0xfff8001f)) | (((uint32_t)offDisp & UINT32_C(0x00003fff)) << 5);
|
---|
10402 | continue;
|
---|
10403 | }
|
---|
10404 |
|
---|
10405 | #endif
|
---|
10406 | case kIemNativeFixupType_Invalid:
|
---|
10407 | case kIemNativeFixupType_End:
|
---|
10408 | break;
|
---|
10409 | }
|
---|
10410 | AssertFailed();
|
---|
10411 | }
|
---|
10412 |
|
---|
10413 | #ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
|
---|
10414 | /*
|
---|
10415 | * Apply TB exit fixups.
|
---|
10416 | */
|
---|
10417 | PIEMNATIVEEXITFIXUP const paTbExitFixups = pReNative->paTbExitFixups;
|
---|
10418 | uint32_t const cTbExitFixups = pReNative->cTbExitFixups;
|
---|
10419 | for (uint32_t i = 0; i < cTbExitFixups; i++)
|
---|
10420 | {
|
---|
10421 | Assert(paTbExitFixups[i].off < off);
|
---|
10422 | Assert(IEMNATIVELABELTYPE_IS_EXIT_REASON(paTbExitFixups[i].enmExitReason));
|
---|
10423 | RTPTRUNION const Ptr = { &paFinalInstrBuf[paTbExitFixups[i].off] };
|
---|
10424 |
|
---|
10425 | # if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
|
---|
10426 | Assert(paTbExitFixups[i].off + 4 <= off);
|
---|
10427 | intptr_t const offDisp = pCtx->apExitLabels[paTbExitFixups[i].enmExitReason] - &paFinalInstrBufRx[paTbExitFixups[i].off + 4];
|
---|
10428 | Assert(offDisp >= INT32_MIN && offDisp <= INT32_MAX);
|
---|
10429 | *Ptr.pi32 = (int32_t)offDisp;
|
---|
10430 |
|
---|
10431 | # elif defined(RT_ARCH_ARM64)
|
---|
10432 | intptr_t const offDisp = pCtx->apExitLabels[paTbExitFixups[i].enmExitReason] - &paFinalInstrBufRx[paTbExitFixups[i].off];
|
---|
10433 | Assert(offDisp >= -33554432 && offDisp < 33554432);
|
---|
10434 | *Ptr.pu32 = (*Ptr.pu32 & UINT32_C(0xfc000000)) | ((uint32_t)offDisp & UINT32_C(0x03ffffff));
|
---|
10435 |
|
---|
10436 | # else
|
---|
10437 | # error "Port me!"
|
---|
10438 | # endif
|
---|
10439 | }
|
---|
10440 | #endif
|
---|
10441 |
|
---|
10442 | iemExecMemAllocatorReadyForUse(pVCpu, paFinalInstrBufRx, off * sizeof(IEMNATIVEINSTR));
|
---|
10443 | STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->iem.s.StatTbNativeCode, off * sizeof(IEMNATIVEINSTR));
|
---|
10444 |
|
---|
10445 | /*
|
---|
10446 | * Convert the translation block.
|
---|
10447 | */
|
---|
10448 | RTMemFree(pTb->Thrd.paCalls);
|
---|
10449 | pTb->Native.paInstructions = paFinalInstrBufRx;
|
---|
10450 | pTb->Native.cInstructions = off;
|
---|
10451 | pTb->fFlags = (pTb->fFlags & ~IEMTB_F_TYPE_MASK) | IEMTB_F_TYPE_NATIVE;
|
---|
10452 | #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
|
---|
10453 | pReNative->pDbgInfo->FlatPc = pTb->FlatPc;
|
---|
10454 | pTb->pDbgInfo = (PIEMTBDBG)RTMemDup(pReNative->pDbgInfo, /* non-fatal, so not return check. */
|
---|
10455 | RT_UOFFSETOF_DYN(IEMTBDBG, aEntries[pReNative->pDbgInfo->cEntries]));
|
---|
10456 | #endif
|
---|
10457 |
|
---|
10458 | Assert(pTbAllocator->cThreadedTbs > 0);
|
---|
10459 | pTbAllocator->cThreadedTbs -= 1;
|
---|
10460 | pTbAllocator->cNativeTbs += 1;
|
---|
10461 | Assert(pTbAllocator->cNativeTbs <= pTbAllocator->cTotalTbs);
|
---|
10462 |
|
---|
10463 | #ifdef LOG_ENABLED
|
---|
10464 | /*
|
---|
10465 | * Disassemble to the log if enabled.
|
---|
10466 | */
|
---|
10467 | if (LogIs3Enabled())
|
---|
10468 | {
|
---|
10469 | Log3(("----------------------------------------- %d calls ---------------------------------------\n", cCallsOrg));
|
---|
10470 | iemNativeDisassembleTb(pVCpu, pTb, DBGFR3InfoLogHlp());
|
---|
10471 | # if defined(DEBUG_bird) || defined(DEBUG_aeichner)
|
---|
10472 | RTLogFlush(NULL);
|
---|
10473 | # endif
|
---|
10474 | }
|
---|
10475 | #endif
|
---|
10476 | /*iemNativeDisassembleTb(pTb, DBGFR3InfoLogRelHlp());*/
|
---|
10477 |
|
---|
10478 | STAM_REL_PROFILE_STOP(&pVCpu->iem.s.StatNativeRecompilation, a);
|
---|
10479 | return pTb;
|
---|
10480 | }
|
---|
10481 |
|
---|