VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp@ 103912

Last change on this file since 103912 was 103912, checked in by vboxsync, 9 months ago

VMM/IEM: Fix iemNativeEmitGuestSimdRegValueCheck() for 256-bit values, was comparing the upper half to the wrong register, bugref:10614

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 420.1 KB
Line 
1/* $Id: IEMAllN8veRecompiler.cpp 103912 2024-03-19 11:19:18Z vboxsync $ */
2/** @file
3 * IEM - Native Recompiler
4 *
5 * Logging group IEM_RE_NATIVE assignments:
6 * - Level 1 (Log) : ...
7 * - Flow (LogFlow) : ...
8 * - Level 2 (Log2) : Details calls as they're recompiled.
9 * - Level 3 (Log3) : Disassemble native code after recompiling.
10 * - Level 4 (Log4) : ...
11 * - Level 5 (Log5) : ...
12 * - Level 6 (Log6) : ...
13 * - Level 7 (Log7) : ...
14 * - Level 8 (Log8) : ...
15 * - Level 9 (Log9) : ...
16 * - Level 10 (Log10): ...
17 * - Level 11 (Log11): Variable allocator.
18 * - Level 12 (Log12): Register allocator.
19 */
20
21/*
22 * Copyright (C) 2023 Oracle and/or its affiliates.
23 *
24 * This file is part of VirtualBox base platform packages, as
25 * available from https://www.virtualbox.org.
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation, in version 3 of the
30 * License.
31 *
32 * This program is distributed in the hope that it will be useful, but
33 * WITHOUT ANY WARRANTY; without even the implied warranty of
34 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
35 * General Public License for more details.
36 *
37 * You should have received a copy of the GNU General Public License
38 * along with this program; if not, see <https://www.gnu.org/licenses>.
39 *
40 * SPDX-License-Identifier: GPL-3.0-only
41 */
42
43
44/*********************************************************************************************************************************
45* Header Files *
46*********************************************************************************************************************************/
47#define LOG_GROUP LOG_GROUP_IEM_RE_NATIVE
48#define IEM_WITH_OPAQUE_DECODER_STATE
49#define VMCPU_INCL_CPUM_GST_CTX
50#define VMM_INCLUDED_SRC_include_IEMMc_h /* block IEMMc.h inclusion. */
51#include <VBox/vmm/iem.h>
52#include <VBox/vmm/cpum.h>
53#include <VBox/vmm/dbgf.h>
54#include "IEMInternal.h"
55#include <VBox/vmm/vmcc.h>
56#include <VBox/log.h>
57#include <VBox/err.h>
58#include <VBox/dis.h>
59#include <VBox/param.h>
60#include <iprt/assert.h>
61#include <iprt/heap.h>
62#include <iprt/mem.h>
63#include <iprt/string.h>
64#if defined(RT_ARCH_AMD64)
65# include <iprt/x86.h>
66#elif defined(RT_ARCH_ARM64)
67# include <iprt/armv8.h>
68#endif
69
70#ifdef RT_OS_WINDOWS
71# include <iprt/formats/pecoff.h> /* this is incomaptible with windows.h, thus: */
72extern "C" DECLIMPORT(uint8_t) __cdecl RtlAddFunctionTable(void *pvFunctionTable, uint32_t cEntries, uintptr_t uBaseAddress);
73extern "C" DECLIMPORT(uint8_t) __cdecl RtlDelFunctionTable(void *pvFunctionTable);
74#else
75# include <iprt/formats/dwarf.h>
76# if defined(RT_OS_DARWIN)
77# include <libkern/OSCacheControl.h>
78# define IEMNATIVE_USE_LIBUNWIND
79extern "C" void __register_frame(const void *pvFde);
80extern "C" void __deregister_frame(const void *pvFde);
81# else
82# ifdef DEBUG_bird /** @todo not thread safe yet */
83# define IEMNATIVE_USE_GDB_JIT
84# endif
85# ifdef IEMNATIVE_USE_GDB_JIT
86# include <iprt/critsect.h>
87# include <iprt/once.h>
88# include <iprt/formats/elf64.h>
89# endif
90extern "C" void __register_frame_info(void *pvBegin, void *pvObj); /* found no header for these two */
91extern "C" void *__deregister_frame_info(void *pvBegin); /* (returns pvObj from __register_frame_info call) */
92# endif
93#endif
94#ifdef VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER
95# include "/opt/local/include/capstone/capstone.h"
96#endif
97
98#include "IEMInline.h"
99#include "IEMThreadedFunctions.h"
100#include "IEMN8veRecompiler.h"
101#include "IEMN8veRecompilerEmit.h"
102#include "IEMN8veRecompilerTlbLookup.h"
103#include "IEMNativeFunctions.h"
104
105
106/*
107 * Narrow down configs here to avoid wasting time on unused configs here.
108 * Note! Same checks in IEMAllThrdRecompiler.cpp.
109 */
110
111#ifndef IEM_WITH_CODE_TLB
112# error The code TLB must be enabled for the recompiler.
113#endif
114
115#ifndef IEM_WITH_DATA_TLB
116# error The data TLB must be enabled for the recompiler.
117#endif
118
119#ifndef IEM_WITH_SETJMP
120# error The setjmp approach must be enabled for the recompiler.
121#endif
122
123/** @todo eliminate this clang build hack. */
124#if RT_CLANG_PREREQ(4, 0)
125# pragma GCC diagnostic ignored "-Wunused-function"
126#endif
127
128
129/*********************************************************************************************************************************
130* Internal Functions *
131*********************************************************************************************************************************/
132#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
133static void iemNativeDbgInfoAddLabel(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType, uint16_t uData);
134#endif
135DECL_FORCE_INLINE(void) iemNativeRegClearGstRegShadowing(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg, uint32_t off);
136DECL_FORCE_INLINE(void) iemNativeRegClearGstRegShadowingOne(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg,
137 IEMNATIVEGSTREG enmGstReg, uint32_t off);
138DECL_INLINE_THROW(void) iemNativeVarRegisterRelease(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar);
139
140
141/*********************************************************************************************************************************
142* Executable Memory Allocator *
143*********************************************************************************************************************************/
144/** @def IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
145 * Use an alternative chunk sub-allocator that does store internal data
146 * in the chunk.
147 *
148 * Using the RTHeapSimple is not practial on newer darwin systems where
149 * RTMEM_PROT_WRITE and RTMEM_PROT_EXEC are mutually exclusive in process
150 * memory. We would have to change the protection of the whole chunk for
151 * every call to RTHeapSimple, which would be rather expensive.
152 *
153 * This alternative implemenation let restrict page protection modifications
154 * to the pages backing the executable memory we just allocated.
155 */
156#define IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
157/** The chunk sub-allocation unit size in bytes. */
158#define IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SIZE 128
159/** The chunk sub-allocation unit size as a shift factor. */
160#define IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SHIFT 7
161
162#if defined(IN_RING3) && !defined(RT_OS_WINDOWS)
163# ifdef IEMNATIVE_USE_GDB_JIT
164# define IEMNATIVE_USE_GDB_JIT_ET_DYN
165
166/** GDB JIT: Code entry. */
167typedef struct GDBJITCODEENTRY
168{
169 struct GDBJITCODEENTRY *pNext;
170 struct GDBJITCODEENTRY *pPrev;
171 uint8_t *pbSymFile;
172 uint64_t cbSymFile;
173} GDBJITCODEENTRY;
174
175/** GDB JIT: Actions. */
176typedef enum GDBJITACTIONS : uint32_t
177{
178 kGdbJitaction_NoAction = 0, kGdbJitaction_Register, kGdbJitaction_Unregister
179} GDBJITACTIONS;
180
181/** GDB JIT: Descriptor. */
182typedef struct GDBJITDESCRIPTOR
183{
184 uint32_t uVersion;
185 GDBJITACTIONS enmAction;
186 GDBJITCODEENTRY *pRelevant;
187 GDBJITCODEENTRY *pHead;
188 /** Our addition: */
189 GDBJITCODEENTRY *pTail;
190} GDBJITDESCRIPTOR;
191
192/** GDB JIT: Our simple symbol file data. */
193typedef struct GDBJITSYMFILE
194{
195 Elf64_Ehdr EHdr;
196# ifndef IEMNATIVE_USE_GDB_JIT_ET_DYN
197 Elf64_Shdr aShdrs[5];
198# else
199 Elf64_Shdr aShdrs[7];
200 Elf64_Phdr aPhdrs[2];
201# endif
202 /** The dwarf ehframe data for the chunk. */
203 uint8_t abEhFrame[512];
204 char szzStrTab[128];
205 Elf64_Sym aSymbols[3];
206# ifdef IEMNATIVE_USE_GDB_JIT_ET_DYN
207 Elf64_Sym aDynSyms[2];
208 Elf64_Dyn aDyn[6];
209# endif
210} GDBJITSYMFILE;
211
212extern "C" GDBJITDESCRIPTOR __jit_debug_descriptor;
213extern "C" DECLEXPORT(void) __jit_debug_register_code(void);
214
215/** Init once for g_IemNativeGdbJitLock. */
216static RTONCE g_IemNativeGdbJitOnce = RTONCE_INITIALIZER;
217/** Init once for the critical section. */
218static RTCRITSECT g_IemNativeGdbJitLock;
219
220/** GDB reads the info here. */
221GDBJITDESCRIPTOR __jit_debug_descriptor = { 1, kGdbJitaction_NoAction, NULL, NULL };
222
223/** GDB sets a breakpoint on this and checks __jit_debug_descriptor when hit. */
224DECL_NO_INLINE(RT_NOTHING, DECLEXPORT(void)) __jit_debug_register_code(void)
225{
226 ASMNopPause();
227}
228
229/** @callback_method_impl{FNRTONCE} */
230static DECLCALLBACK(int32_t) iemNativeGdbJitInitOnce(void *pvUser)
231{
232 RT_NOREF(pvUser);
233 return RTCritSectInit(&g_IemNativeGdbJitLock);
234}
235
236
237# endif /* IEMNATIVE_USE_GDB_JIT */
238
239/**
240 * Per-chunk unwind info for non-windows hosts.
241 */
242typedef struct IEMEXECMEMCHUNKEHFRAME
243{
244# ifdef IEMNATIVE_USE_LIBUNWIND
245 /** The offset of the FDA into abEhFrame. */
246 uintptr_t offFda;
247# else
248 /** 'struct object' storage area. */
249 uint8_t abObject[1024];
250# endif
251# ifdef IEMNATIVE_USE_GDB_JIT
252# if 0
253 /** The GDB JIT 'symbol file' data. */
254 GDBJITSYMFILE GdbJitSymFile;
255# endif
256 /** The GDB JIT list entry. */
257 GDBJITCODEENTRY GdbJitEntry;
258# endif
259 /** The dwarf ehframe data for the chunk. */
260 uint8_t abEhFrame[512];
261} IEMEXECMEMCHUNKEHFRAME;
262/** Pointer to per-chunk info info for non-windows hosts. */
263typedef IEMEXECMEMCHUNKEHFRAME *PIEMEXECMEMCHUNKEHFRAME;
264#endif
265
266
267/**
268 * An chunk of executable memory.
269 */
270typedef struct IEMEXECMEMCHUNK
271{
272#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
273 /** Number of free items in this chunk. */
274 uint32_t cFreeUnits;
275 /** Hint were to start searching for free space in the allocation bitmap. */
276 uint32_t idxFreeHint;
277#else
278 /** The heap handle. */
279 RTHEAPSIMPLE hHeap;
280#endif
281 /** Pointer to the chunk. */
282 void *pvChunk;
283#ifdef IN_RING3
284 /**
285 * Pointer to the unwind information.
286 *
287 * This is used during C++ throw and longjmp (windows and probably most other
288 * platforms). Some debuggers (windbg) makes use of it as well.
289 *
290 * Windows: This is allocated from hHeap on windows because (at least for
291 * AMD64) the UNWIND_INFO structure address in the
292 * RUNTIME_FUNCTION entry is an RVA and the chunk is the "image".
293 *
294 * Others: Allocated from the regular heap to avoid unnecessary executable data
295 * structures. This points to an IEMEXECMEMCHUNKEHFRAME structure. */
296 void *pvUnwindInfo;
297#elif defined(IN_RING0)
298 /** Allocation handle. */
299 RTR0MEMOBJ hMemObj;
300#endif
301} IEMEXECMEMCHUNK;
302/** Pointer to a memory chunk. */
303typedef IEMEXECMEMCHUNK *PIEMEXECMEMCHUNK;
304
305
306/**
307 * Executable memory allocator for the native recompiler.
308 */
309typedef struct IEMEXECMEMALLOCATOR
310{
311 /** Magic value (IEMEXECMEMALLOCATOR_MAGIC). */
312 uint32_t uMagic;
313
314 /** The chunk size. */
315 uint32_t cbChunk;
316 /** The maximum number of chunks. */
317 uint32_t cMaxChunks;
318 /** The current number of chunks. */
319 uint32_t cChunks;
320 /** Hint where to start looking for available memory. */
321 uint32_t idxChunkHint;
322 /** Statistics: Current number of allocations. */
323 uint32_t cAllocations;
324
325 /** The total amount of memory available. */
326 uint64_t cbTotal;
327 /** Total amount of free memory. */
328 uint64_t cbFree;
329 /** Total amount of memory allocated. */
330 uint64_t cbAllocated;
331
332#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
333 /** Pointer to the allocation bitmaps for all the chunks (follows aChunks).
334 *
335 * Since the chunk size is a power of two and the minimum chunk size is a lot
336 * higher than the IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SIZE, each chunk will always
337 * require a whole number of uint64_t elements in the allocation bitmap. So,
338 * for sake of simplicity, they are allocated as one continous chunk for
339 * simplicity/laziness. */
340 uint64_t *pbmAlloc;
341 /** Number of units (IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SIZE) per chunk. */
342 uint32_t cUnitsPerChunk;
343 /** Number of bitmap elements per chunk (for quickly locating the bitmap
344 * portion corresponding to an chunk). */
345 uint32_t cBitmapElementsPerChunk;
346#else
347 /** @name Tweaks to get 64 byte aligned allocats w/o unnecessary fragmentation.
348 * @{ */
349 /** The size of the heap internal block header. This is used to adjust the
350 * request memory size to make sure there is exacly enough room for a header at
351 * the end of the blocks we allocate before the next 64 byte alignment line. */
352 uint32_t cbHeapBlockHdr;
353 /** The size of initial heap allocation required make sure the first
354 * allocation is correctly aligned. */
355 uint32_t cbHeapAlignTweak;
356 /** The alignment tweak allocation address. */
357 void *pvAlignTweak;
358 /** @} */
359#endif
360
361#if defined(IN_RING3) && !defined(RT_OS_WINDOWS)
362 /** Pointer to the array of unwind info running parallel to aChunks (same
363 * allocation as this structure, located after the bitmaps).
364 * (For Windows, the structures must reside in 32-bit RVA distance to the
365 * actual chunk, so they are allocated off the chunk.) */
366 PIEMEXECMEMCHUNKEHFRAME paEhFrames;
367#endif
368
369 /** The allocation chunks. */
370 RT_FLEXIBLE_ARRAY_EXTENSION
371 IEMEXECMEMCHUNK aChunks[RT_FLEXIBLE_ARRAY];
372} IEMEXECMEMALLOCATOR;
373/** Pointer to an executable memory allocator. */
374typedef IEMEXECMEMALLOCATOR *PIEMEXECMEMALLOCATOR;
375
376/** Magic value for IEMEXECMEMALLOCATOR::uMagic (Scott Frederick Turow). */
377#define IEMEXECMEMALLOCATOR_MAGIC UINT32_C(0x19490412)
378
379
380static int iemExecMemAllocatorGrow(PVMCPUCC pVCpu, PIEMEXECMEMALLOCATOR pExecMemAllocator);
381
382
383/**
384 * Worker for iemExecMemAllocatorAlloc that returns @a pvRet after updating
385 * the heap statistics.
386 */
387static void * iemExecMemAllocatorAllocTailCode(PIEMEXECMEMALLOCATOR pExecMemAllocator, void *pvRet,
388 uint32_t cbReq, uint32_t idxChunk)
389{
390 pExecMemAllocator->cAllocations += 1;
391 pExecMemAllocator->cbAllocated += cbReq;
392#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
393 pExecMemAllocator->cbFree -= cbReq;
394#else
395 pExecMemAllocator->cbFree -= RT_ALIGN_32(cbReq, 64);
396#endif
397 pExecMemAllocator->idxChunkHint = idxChunk;
398
399#ifdef RT_OS_DARWIN
400 /*
401 * Sucks, but RTMEM_PROT_EXEC and RTMEM_PROT_WRITE are mutually exclusive
402 * on darwin. So, we mark the pages returned as read+write after alloc and
403 * expect the caller to call iemExecMemAllocatorReadyForUse when done
404 * writing to the allocation.
405 *
406 * See also https://developer.apple.com/documentation/apple-silicon/porting-just-in-time-compilers-to-apple-silicon
407 * for details.
408 */
409 /** @todo detect if this is necessary... it wasn't required on 10.15 or
410 * whatever older version it was. */
411 int rc = RTMemProtect(pvRet, cbReq, RTMEM_PROT_WRITE | RTMEM_PROT_READ);
412 AssertRC(rc);
413#endif
414
415 return pvRet;
416}
417
418
419#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
420static void *iemExecMemAllocatorAllocInChunkInt(PIEMEXECMEMALLOCATOR pExecMemAllocator, uint64_t *pbmAlloc, uint32_t idxFirst,
421 uint32_t cToScan, uint32_t cReqUnits, uint32_t idxChunk)
422{
423 /*
424 * Shift the bitmap to the idxFirst bit so we can use ASMBitFirstClear.
425 */
426 Assert(!(cToScan & 63));
427 Assert(!(idxFirst & 63));
428 Assert(cToScan + idxFirst <= pExecMemAllocator->cUnitsPerChunk);
429 pbmAlloc += idxFirst / 64;
430
431 /*
432 * Scan the bitmap for cReqUnits of consequtive clear bits
433 */
434 /** @todo This can probably be done more efficiently for non-x86 systems. */
435 int iBit = ASMBitFirstClear(pbmAlloc, cToScan);
436 while (iBit >= 0 && (uint32_t)iBit <= cToScan - cReqUnits)
437 {
438 uint32_t idxAddBit = 1;
439 while (idxAddBit < cReqUnits && !ASMBitTest(pbmAlloc, (uint32_t)iBit + idxAddBit))
440 idxAddBit++;
441 if (idxAddBit >= cReqUnits)
442 {
443 ASMBitSetRange(pbmAlloc, (uint32_t)iBit, (uint32_t)iBit + cReqUnits);
444
445 PIEMEXECMEMCHUNK const pChunk = &pExecMemAllocator->aChunks[idxChunk];
446 pChunk->cFreeUnits -= cReqUnits;
447 pChunk->idxFreeHint = (uint32_t)iBit + cReqUnits;
448
449 void * const pvRet = (uint8_t *)pChunk->pvChunk
450 + ((idxFirst + (uint32_t)iBit) << IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SHIFT);
451
452 return iemExecMemAllocatorAllocTailCode(pExecMemAllocator, pvRet,
453 cReqUnits << IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SHIFT, idxChunk);
454 }
455
456 iBit = ASMBitNextClear(pbmAlloc, cToScan, iBit + idxAddBit - 1);
457 }
458 return NULL;
459}
460#endif /* IEMEXECMEM_USE_ALT_SUB_ALLOCATOR */
461
462
463static void *iemExecMemAllocatorAllocInChunk(PIEMEXECMEMALLOCATOR pExecMemAllocator, uint32_t idxChunk, uint32_t cbReq)
464{
465#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
466 /*
467 * Figure out how much to allocate.
468 */
469 uint32_t const cReqUnits = (cbReq + IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SIZE - 1) >> IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SHIFT;
470 if (cReqUnits <= pExecMemAllocator->aChunks[idxChunk].cFreeUnits)
471 {
472 uint64_t * const pbmAlloc = &pExecMemAllocator->pbmAlloc[pExecMemAllocator->cBitmapElementsPerChunk * idxChunk];
473 uint32_t const idxHint = pExecMemAllocator->aChunks[idxChunk].idxFreeHint & ~(uint32_t)63;
474 if (idxHint + cReqUnits <= pExecMemAllocator->cUnitsPerChunk)
475 {
476 void *pvRet = iemExecMemAllocatorAllocInChunkInt(pExecMemAllocator, pbmAlloc, idxHint,
477 pExecMemAllocator->cUnitsPerChunk - idxHint, cReqUnits, idxChunk);
478 if (pvRet)
479 return pvRet;
480 }
481 return iemExecMemAllocatorAllocInChunkInt(pExecMemAllocator, pbmAlloc, 0,
482 RT_MIN(pExecMemAllocator->cUnitsPerChunk, RT_ALIGN_32(idxHint + cReqUnits, 64)),
483 cReqUnits, idxChunk);
484 }
485#else
486 void *pvRet = RTHeapSimpleAlloc(pExecMemAllocator->aChunks[idxChunk].hHeap, cbReq, 32);
487 if (pvRet)
488 return iemExecMemAllocatorAllocTailCode(pExecMemAllocator, pvRet, cbReq, idxChunk);
489#endif
490 return NULL;
491
492}
493
494
495/**
496 * Allocates @a cbReq bytes of executable memory.
497 *
498 * @returns Pointer to the memory, NULL if out of memory or other problem
499 * encountered.
500 * @param pVCpu The cross context virtual CPU structure of the calling
501 * thread.
502 * @param cbReq How many bytes are required.
503 */
504static void *iemExecMemAllocatorAlloc(PVMCPU pVCpu, uint32_t cbReq)
505{
506 PIEMEXECMEMALLOCATOR pExecMemAllocator = pVCpu->iem.s.pExecMemAllocatorR3;
507 AssertReturn(pExecMemAllocator && pExecMemAllocator->uMagic == IEMEXECMEMALLOCATOR_MAGIC, NULL);
508 AssertMsgReturn(cbReq > 32 && cbReq < _512K, ("%#x\n", cbReq), NULL);
509
510
511 for (unsigned iIteration = 0;; iIteration++)
512 {
513 /*
514 * Adjust the request size so it'll fit the allocator alignment/whatnot.
515 *
516 * For the RTHeapSimple allocator this means to follow the logic described
517 * in iemExecMemAllocatorGrow and attempt to allocate it from one of the
518 * existing chunks if we think we've got sufficient free memory around.
519 *
520 * While for the alternative one we just align it up to a whole unit size.
521 */
522#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
523 cbReq = RT_ALIGN_32(cbReq, IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SIZE);
524#else
525 cbReq = RT_ALIGN_32(cbReq + pExecMemAllocator->cbHeapBlockHdr, 64) - pExecMemAllocator->cbHeapBlockHdr;
526#endif
527 if (cbReq <= pExecMemAllocator->cbFree)
528 {
529 uint32_t const cChunks = pExecMemAllocator->cChunks;
530 uint32_t const idxChunkHint = pExecMemAllocator->idxChunkHint < cChunks ? pExecMemAllocator->idxChunkHint : 0;
531 for (uint32_t idxChunk = idxChunkHint; idxChunk < cChunks; idxChunk++)
532 {
533 void *pvRet = iemExecMemAllocatorAllocInChunk(pExecMemAllocator, idxChunk, cbReq);
534 if (pvRet)
535 return pvRet;
536 }
537 for (uint32_t idxChunk = 0; idxChunk < idxChunkHint; idxChunk++)
538 {
539 void *pvRet = iemExecMemAllocatorAllocInChunk(pExecMemAllocator, idxChunk, cbReq);
540 if (pvRet)
541 return pvRet;
542 }
543 }
544
545 /*
546 * Can we grow it with another chunk?
547 */
548 if (pExecMemAllocator->cChunks < pExecMemAllocator->cMaxChunks)
549 {
550 int rc = iemExecMemAllocatorGrow(pVCpu, pExecMemAllocator);
551 AssertLogRelRCReturn(rc, NULL);
552
553 uint32_t const idxChunk = pExecMemAllocator->cChunks - 1;
554 void *pvRet = iemExecMemAllocatorAllocInChunk(pExecMemAllocator, idxChunk, cbReq);
555 if (pvRet)
556 return pvRet;
557 AssertFailed();
558 }
559
560 /*
561 * Try prune native TBs once.
562 */
563 if (iIteration == 0)
564 iemTbAllocatorFreeupNativeSpace(pVCpu, cbReq / sizeof(IEMNATIVEINSTR));
565 else
566 {
567 /** @todo stats... */
568 return NULL;
569 }
570 }
571
572}
573
574
575/** This is a hook that we may need later for changing memory protection back
576 * to readonly+exec */
577static void iemExecMemAllocatorReadyForUse(PVMCPUCC pVCpu, void *pv, size_t cb)
578{
579#ifdef RT_OS_DARWIN
580 /* See iemExecMemAllocatorAllocTailCode for the explanation. */
581 int rc = RTMemProtect(pv, cb, RTMEM_PROT_EXEC | RTMEM_PROT_READ);
582 AssertRC(rc); RT_NOREF(pVCpu);
583
584 /*
585 * Flush the instruction cache:
586 * https://developer.apple.com/documentation/apple-silicon/porting-just-in-time-compilers-to-apple-silicon
587 */
588 /* sys_dcache_flush(pv, cb); - not necessary */
589 sys_icache_invalidate(pv, cb);
590#else
591 RT_NOREF(pVCpu, pv, cb);
592#endif
593}
594
595
596/**
597 * Frees executable memory.
598 */
599void iemExecMemAllocatorFree(PVMCPU pVCpu, void *pv, size_t cb)
600{
601 PIEMEXECMEMALLOCATOR pExecMemAllocator = pVCpu->iem.s.pExecMemAllocatorR3;
602 Assert(pExecMemAllocator && pExecMemAllocator->uMagic == IEMEXECMEMALLOCATOR_MAGIC);
603 Assert(pv);
604#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
605 Assert(!((uintptr_t)pv & (IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SIZE - 1)));
606#else
607 Assert(!((uintptr_t)pv & 63));
608#endif
609
610 /* Align the size as we did when allocating the block. */
611#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
612 cb = RT_ALIGN_Z(cb, IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SIZE);
613#else
614 cb = RT_ALIGN_Z(cb + pExecMemAllocator->cbHeapBlockHdr, 64) - pExecMemAllocator->cbHeapBlockHdr;
615#endif
616
617 /* Free it / assert sanity. */
618#if defined(VBOX_STRICT) || defined(IEMEXECMEM_USE_ALT_SUB_ALLOCATOR)
619 uint32_t const cChunks = pExecMemAllocator->cChunks;
620 uint32_t const cbChunk = pExecMemAllocator->cbChunk;
621 bool fFound = false;
622 for (uint32_t idxChunk = 0; idxChunk < cChunks; idxChunk++)
623 {
624 uintptr_t const offChunk = (uintptr_t)pv - (uintptr_t)pExecMemAllocator->aChunks[idxChunk].pvChunk;
625 fFound = offChunk < cbChunk;
626 if (fFound)
627 {
628#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
629 uint32_t const idxFirst = (uint32_t)offChunk >> IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SHIFT;
630 uint32_t const cReqUnits = (uint32_t)cb >> IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SHIFT;
631
632 /* Check that it's valid and free it. */
633 uint64_t * const pbmAlloc = &pExecMemAllocator->pbmAlloc[pExecMemAllocator->cBitmapElementsPerChunk * idxChunk];
634 AssertReturnVoid(ASMBitTest(pbmAlloc, idxFirst));
635 for (uint32_t i = 1; i < cReqUnits; i++)
636 AssertReturnVoid(ASMBitTest(pbmAlloc, idxFirst + i));
637 ASMBitClearRange(pbmAlloc, idxFirst, idxFirst + cReqUnits);
638
639 pExecMemAllocator->aChunks[idxChunk].cFreeUnits += cReqUnits;
640 pExecMemAllocator->aChunks[idxChunk].idxFreeHint = idxFirst;
641
642 /* Update the stats. */
643 pExecMemAllocator->cbAllocated -= cb;
644 pExecMemAllocator->cbFree += cb;
645 pExecMemAllocator->cAllocations -= 1;
646 return;
647#else
648 Assert(RTHeapSimpleSize(pExecMemAllocator->aChunks[idxChunk].hHeap, pv) == cb);
649 break;
650#endif
651 }
652 }
653# ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
654 AssertFailed();
655# else
656 Assert(fFound);
657# endif
658#endif
659
660#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
661 /* Update stats while cb is freshly calculated.*/
662 pExecMemAllocator->cbAllocated -= cb;
663 pExecMemAllocator->cbFree += RT_ALIGN_Z(cb, 64);
664 pExecMemAllocator->cAllocations -= 1;
665
666 /* Free it. */
667 RTHeapSimpleFree(NIL_RTHEAPSIMPLE, pv);
668#endif
669}
670
671
672
673#ifdef IN_RING3
674# ifdef RT_OS_WINDOWS
675
676/**
677 * Initializes the unwind info structures for windows hosts.
678 */
679static int
680iemExecMemAllocatorInitAndRegisterUnwindInfoForChunk(PVMCPUCC pVCpu, PIEMEXECMEMALLOCATOR pExecMemAllocator,
681 void *pvChunk, uint32_t idxChunk)
682{
683 RT_NOREF(pVCpu);
684
685 /*
686 * The AMD64 unwind opcodes.
687 *
688 * This is a program that starts with RSP after a RET instruction that
689 * ends up in recompiled code, and the operations we describe here will
690 * restore all non-volatile registers and bring RSP back to where our
691 * RET address is. This means it's reverse order from what happens in
692 * the prologue.
693 *
694 * Note! Using a frame register approach here both because we have one
695 * and but mainly because the UWOP_ALLOC_LARGE argument values
696 * would be a pain to write initializers for. On the positive
697 * side, we're impervious to changes in the the stack variable
698 * area can can deal with dynamic stack allocations if necessary.
699 */
700 static const IMAGE_UNWIND_CODE s_aOpcodes[] =
701 {
702 { { 16, IMAGE_AMD64_UWOP_SET_FPREG, 0 } }, /* RSP = RBP - FrameOffset * 10 (0x60) */
703 { { 16, IMAGE_AMD64_UWOP_ALLOC_SMALL, 0 } }, /* RSP += 8; */
704 { { 14, IMAGE_AMD64_UWOP_PUSH_NONVOL, X86_GREG_x15 } }, /* R15 = [RSP]; RSP += 8; */
705 { { 12, IMAGE_AMD64_UWOP_PUSH_NONVOL, X86_GREG_x14 } }, /* R14 = [RSP]; RSP += 8; */
706 { { 10, IMAGE_AMD64_UWOP_PUSH_NONVOL, X86_GREG_x13 } }, /* R13 = [RSP]; RSP += 8; */
707 { { 8, IMAGE_AMD64_UWOP_PUSH_NONVOL, X86_GREG_x12 } }, /* R12 = [RSP]; RSP += 8; */
708 { { 7, IMAGE_AMD64_UWOP_PUSH_NONVOL, X86_GREG_xDI } }, /* RDI = [RSP]; RSP += 8; */
709 { { 6, IMAGE_AMD64_UWOP_PUSH_NONVOL, X86_GREG_xSI } }, /* RSI = [RSP]; RSP += 8; */
710 { { 5, IMAGE_AMD64_UWOP_PUSH_NONVOL, X86_GREG_xBX } }, /* RBX = [RSP]; RSP += 8; */
711 { { 4, IMAGE_AMD64_UWOP_PUSH_NONVOL, X86_GREG_xBP } }, /* RBP = [RSP]; RSP += 8; */
712 };
713 union
714 {
715 IMAGE_UNWIND_INFO Info;
716 uint8_t abPadding[RT_UOFFSETOF(IMAGE_UNWIND_INFO, aOpcodes) + 16];
717 } s_UnwindInfo =
718 {
719 {
720 /* .Version = */ 1,
721 /* .Flags = */ 0,
722 /* .SizeOfProlog = */ 16, /* whatever */
723 /* .CountOfCodes = */ RT_ELEMENTS(s_aOpcodes),
724 /* .FrameRegister = */ X86_GREG_xBP,
725 /* .FrameOffset = */ (-IEMNATIVE_FP_OFF_LAST_PUSH + 8) / 16 /* we're off by one slot. sigh. */,
726 }
727 };
728 AssertCompile(-IEMNATIVE_FP_OFF_LAST_PUSH < 240 && -IEMNATIVE_FP_OFF_LAST_PUSH > 0);
729 AssertCompile((-IEMNATIVE_FP_OFF_LAST_PUSH & 0xf) == 8);
730
731 /*
732 * Calc how much space we need and allocate it off the exec heap.
733 */
734 unsigned const cFunctionEntries = 1;
735 unsigned const cbUnwindInfo = sizeof(s_aOpcodes) + RT_UOFFSETOF(IMAGE_UNWIND_INFO, aOpcodes);
736 unsigned const cbNeeded = sizeof(IMAGE_RUNTIME_FUNCTION_ENTRY) * cFunctionEntries + cbUnwindInfo;
737# ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
738 unsigned const cbNeededAligned = RT_ALIGN_32(cbNeeded, IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SIZE);
739 PIMAGE_RUNTIME_FUNCTION_ENTRY const paFunctions
740 = (PIMAGE_RUNTIME_FUNCTION_ENTRY)iemExecMemAllocatorAllocInChunk(pExecMemAllocator, idxChunk, cbNeededAligned);
741# else
742 unsigned const cbNeededAligned = RT_ALIGN_32(cbNeeded + pExecMemAllocator->cbHeapBlockHdr, 64)
743 - pExecMemAllocator->cbHeapBlockHdr;
744 PIMAGE_RUNTIME_FUNCTION_ENTRY const paFunctions = (PIMAGE_RUNTIME_FUNCTION_ENTRY)RTHeapSimpleAlloc(hHeap, cbNeededAligned,
745 32 /*cbAlignment*/);
746# endif
747 AssertReturn(paFunctions, VERR_INTERNAL_ERROR_5);
748 pExecMemAllocator->aChunks[idxChunk].pvUnwindInfo = paFunctions;
749
750 /*
751 * Initialize the structures.
752 */
753 PIMAGE_UNWIND_INFO const pInfo = (PIMAGE_UNWIND_INFO)&paFunctions[cFunctionEntries];
754
755 paFunctions[0].BeginAddress = 0;
756 paFunctions[0].EndAddress = pExecMemAllocator->cbChunk;
757 paFunctions[0].UnwindInfoAddress = (uint32_t)((uintptr_t)pInfo - (uintptr_t)pvChunk);
758
759 memcpy(pInfo, &s_UnwindInfo, RT_UOFFSETOF(IMAGE_UNWIND_INFO, aOpcodes));
760 memcpy(&pInfo->aOpcodes[0], s_aOpcodes, sizeof(s_aOpcodes));
761
762 /*
763 * Register it.
764 */
765 uint8_t fRet = RtlAddFunctionTable(paFunctions, cFunctionEntries, (uintptr_t)pvChunk);
766 AssertReturn(fRet, VERR_INTERNAL_ERROR_3); /* Nothing to clean up on failure, since its within the chunk itself. */
767
768 return VINF_SUCCESS;
769}
770
771
772# else /* !RT_OS_WINDOWS */
773
774/**
775 * Emits a LEB128 encoded value between -0x2000 and 0x2000 (both exclusive).
776 */
777DECLINLINE(RTPTRUNION) iemDwarfPutLeb128(RTPTRUNION Ptr, int32_t iValue)
778{
779 if (iValue >= 64)
780 {
781 Assert(iValue < 0x2000);
782 *Ptr.pb++ = ((uint8_t)iValue & 0x7f) | 0x80;
783 *Ptr.pb++ = (uint8_t)(iValue >> 7) & 0x3f;
784 }
785 else if (iValue >= 0)
786 *Ptr.pb++ = (uint8_t)iValue;
787 else if (iValue > -64)
788 *Ptr.pb++ = ((uint8_t)iValue & 0x3f) | 0x40;
789 else
790 {
791 Assert(iValue > -0x2000);
792 *Ptr.pb++ = ((uint8_t)iValue & 0x7f) | 0x80;
793 *Ptr.pb++ = ((uint8_t)(iValue >> 7) & 0x3f) | 0x40;
794 }
795 return Ptr;
796}
797
798
799/**
800 * Emits an ULEB128 encoded value (up to 64-bit wide).
801 */
802DECLINLINE(RTPTRUNION) iemDwarfPutUleb128(RTPTRUNION Ptr, uint64_t uValue)
803{
804 while (uValue >= 0x80)
805 {
806 *Ptr.pb++ = ((uint8_t)uValue & 0x7f) | 0x80;
807 uValue >>= 7;
808 }
809 *Ptr.pb++ = (uint8_t)uValue;
810 return Ptr;
811}
812
813
814/**
815 * Emits a CFA rule as register @a uReg + offset @a off.
816 */
817DECLINLINE(RTPTRUNION) iemDwarfPutCfaDefCfa(RTPTRUNION Ptr, uint32_t uReg, uint32_t off)
818{
819 *Ptr.pb++ = DW_CFA_def_cfa;
820 Ptr = iemDwarfPutUleb128(Ptr, uReg);
821 Ptr = iemDwarfPutUleb128(Ptr, off);
822 return Ptr;
823}
824
825
826/**
827 * Emits a register (@a uReg) save location:
828 * CFA + @a off * data_alignment_factor
829 */
830DECLINLINE(RTPTRUNION) iemDwarfPutCfaOffset(RTPTRUNION Ptr, uint32_t uReg, uint32_t off)
831{
832 if (uReg < 0x40)
833 *Ptr.pb++ = DW_CFA_offset | uReg;
834 else
835 {
836 *Ptr.pb++ = DW_CFA_offset_extended;
837 Ptr = iemDwarfPutUleb128(Ptr, uReg);
838 }
839 Ptr = iemDwarfPutUleb128(Ptr, off);
840 return Ptr;
841}
842
843
844# if 0 /* unused */
845/**
846 * Emits a register (@a uReg) save location, using signed offset:
847 * CFA + @a offSigned * data_alignment_factor
848 */
849DECLINLINE(RTPTRUNION) iemDwarfPutCfaSignedOffset(RTPTRUNION Ptr, uint32_t uReg, int32_t offSigned)
850{
851 *Ptr.pb++ = DW_CFA_offset_extended_sf;
852 Ptr = iemDwarfPutUleb128(Ptr, uReg);
853 Ptr = iemDwarfPutLeb128(Ptr, offSigned);
854 return Ptr;
855}
856# endif
857
858
859/**
860 * Initializes the unwind info section for non-windows hosts.
861 */
862static int
863iemExecMemAllocatorInitAndRegisterUnwindInfoForChunk(PVMCPUCC pVCpu, PIEMEXECMEMALLOCATOR pExecMemAllocator,
864 void *pvChunk, uint32_t idxChunk)
865{
866 PIEMEXECMEMCHUNKEHFRAME const pEhFrame = &pExecMemAllocator->paEhFrames[idxChunk];
867 pExecMemAllocator->aChunks[idxChunk].pvUnwindInfo = pEhFrame; /* not necessary, but whatever */
868
869 RTPTRUNION Ptr = { pEhFrame->abEhFrame };
870
871 /*
872 * Generate the CIE first.
873 */
874# ifdef IEMNATIVE_USE_LIBUNWIND /* libunwind (llvm, darwin) only supports v1 and v3. */
875 uint8_t const iDwarfVer = 3;
876# else
877 uint8_t const iDwarfVer = 4;
878# endif
879 RTPTRUNION const PtrCie = Ptr;
880 *Ptr.pu32++ = 123; /* The CIE length will be determined later. */
881 *Ptr.pu32++ = 0 /*UINT32_MAX*/; /* I'm a CIE in .eh_frame speak. */
882 *Ptr.pb++ = iDwarfVer; /* DwARF version */
883 *Ptr.pb++ = 0; /* Augmentation. */
884 if (iDwarfVer >= 4)
885 {
886 *Ptr.pb++ = sizeof(uintptr_t); /* Address size. */
887 *Ptr.pb++ = 0; /* Segment selector size. */
888 }
889# ifdef RT_ARCH_AMD64
890 Ptr = iemDwarfPutLeb128(Ptr, 1); /* Code alignment factor (LEB128 = 1). */
891# else
892 Ptr = iemDwarfPutLeb128(Ptr, 4); /* Code alignment factor (LEB128 = 4). */
893# endif
894 Ptr = iemDwarfPutLeb128(Ptr, -8); /* Data alignment factor (LEB128 = -8). */
895# ifdef RT_ARCH_AMD64
896 Ptr = iemDwarfPutUleb128(Ptr, DWREG_AMD64_RA); /* Return address column (ULEB128) */
897# elif defined(RT_ARCH_ARM64)
898 Ptr = iemDwarfPutUleb128(Ptr, DWREG_ARM64_LR); /* Return address column (ULEB128) */
899# else
900# error "port me"
901# endif
902 /* Initial instructions: */
903# ifdef RT_ARCH_AMD64
904 Ptr = iemDwarfPutCfaDefCfa(Ptr, DWREG_AMD64_RBP, 16); /* CFA = RBP + 0x10 - first stack parameter */
905 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_AMD64_RA, 1); /* Ret RIP = [CFA + 1*-8] */
906 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_AMD64_RBP, 2); /* RBP = [CFA + 2*-8] */
907 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_AMD64_RBX, 3); /* RBX = [CFA + 3*-8] */
908 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_AMD64_R12, 4); /* R12 = [CFA + 4*-8] */
909 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_AMD64_R13, 5); /* R13 = [CFA + 5*-8] */
910 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_AMD64_R14, 6); /* R14 = [CFA + 6*-8] */
911 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_AMD64_R15, 7); /* R15 = [CFA + 7*-8] */
912# elif defined(RT_ARCH_ARM64)
913# if 1
914 Ptr = iemDwarfPutCfaDefCfa(Ptr, DWREG_ARM64_BP, 16); /* CFA = BP + 0x10 - first stack parameter */
915# else
916 Ptr = iemDwarfPutCfaDefCfa(Ptr, DWREG_ARM64_SP, IEMNATIVE_FRAME_VAR_SIZE + IEMNATIVE_FRAME_SAVE_REG_SIZE);
917# endif
918 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_ARM64_LR, 1); /* Ret PC = [CFA + 1*-8] */
919 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_ARM64_BP, 2); /* Ret BP = [CFA + 2*-8] */
920 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_ARM64_X28, 3); /* X28 = [CFA + 3*-8] */
921 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_ARM64_X27, 4); /* X27 = [CFA + 4*-8] */
922 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_ARM64_X26, 5); /* X26 = [CFA + 5*-8] */
923 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_ARM64_X25, 6); /* X25 = [CFA + 6*-8] */
924 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_ARM64_X24, 7); /* X24 = [CFA + 7*-8] */
925 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_ARM64_X23, 8); /* X23 = [CFA + 8*-8] */
926 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_ARM64_X22, 9); /* X22 = [CFA + 9*-8] */
927 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_ARM64_X21, 10); /* X21 = [CFA +10*-8] */
928 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_ARM64_X20, 11); /* X20 = [CFA +11*-8] */
929 Ptr = iemDwarfPutCfaOffset(Ptr, DWREG_ARM64_X19, 12); /* X19 = [CFA +12*-8] */
930 AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE / 8 == 12);
931 /** @todo we we need to do something about clearing DWREG_ARM64_RA_SIGN_STATE or something? */
932# else
933# error "port me"
934# endif
935 while ((Ptr.u - PtrCie.u) & 3)
936 *Ptr.pb++ = DW_CFA_nop;
937 /* Finalize the CIE size. */
938 *PtrCie.pu32 = Ptr.u - PtrCie.u - sizeof(uint32_t);
939
940 /*
941 * Generate an FDE for the whole chunk area.
942 */
943# ifdef IEMNATIVE_USE_LIBUNWIND
944 pEhFrame->offFda = Ptr.u - (uintptr_t)&pEhFrame->abEhFrame[0];
945# endif
946 RTPTRUNION const PtrFde = Ptr;
947 *Ptr.pu32++ = 123; /* The CIE length will be determined later. */
948 *Ptr.pu32 = Ptr.u - PtrCie.u; /* Negated self relative CIE address. */
949 Ptr.pu32++;
950 *Ptr.pu64++ = (uintptr_t)pvChunk; /* Absolute start PC of this FDE. */
951 *Ptr.pu64++ = pExecMemAllocator->cbChunk; /* PC range length for this PDE. */
952# if 0 /* not requried for recent libunwind.dylib nor recent libgcc/glib. */
953 *Ptr.pb++ = DW_CFA_nop;
954# endif
955 while ((Ptr.u - PtrFde.u) & 3)
956 *Ptr.pb++ = DW_CFA_nop;
957 /* Finalize the FDE size. */
958 *PtrFde.pu32 = Ptr.u - PtrFde.u - sizeof(uint32_t);
959
960 /* Terminator entry. */
961 *Ptr.pu32++ = 0;
962 *Ptr.pu32++ = 0; /* just to be sure... */
963 Assert(Ptr.u - (uintptr_t)&pEhFrame->abEhFrame[0] <= sizeof(pEhFrame->abEhFrame));
964
965 /*
966 * Register it.
967 */
968# ifdef IEMNATIVE_USE_LIBUNWIND
969 __register_frame(&pEhFrame->abEhFrame[pEhFrame->offFda]);
970# else
971 memset(pEhFrame->abObject, 0xf6, sizeof(pEhFrame->abObject)); /* color the memory to better spot usage */
972 __register_frame_info(pEhFrame->abEhFrame, pEhFrame->abObject);
973# endif
974
975# ifdef IEMNATIVE_USE_GDB_JIT
976 /*
977 * Now for telling GDB about this (experimental).
978 *
979 * This seems to work best with ET_DYN.
980 */
981 unsigned const cbNeeded = sizeof(GDBJITSYMFILE);
982# ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
983 unsigned const cbNeededAligned = RT_ALIGN_32(cbNeeded, IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SIZE);
984 GDBJITSYMFILE * const pSymFile = (GDBJITSYMFILE *)iemExecMemAllocatorAllocInChunk(pExecMemAllocator, idxChunk, cbNeededAligned);
985# else
986 unsigned const cbNeededAligned = RT_ALIGN_32(cbNeeded + pExecMemAllocator->cbHeapBlockHdr, 64)
987 - pExecMemAllocator->cbHeapBlockHdr;
988 GDBJITSYMFILE * const pSymFile = (PIMAGE_RUNTIME_FUNCTION_ENTRY)RTHeapSimpleAlloc(hHeap, cbNeededAligned, 32 /*cbAlignment*/);
989# endif
990 AssertReturn(pSymFile, VERR_INTERNAL_ERROR_5);
991 unsigned const offSymFileInChunk = (uintptr_t)pSymFile - (uintptr_t)pvChunk;
992
993 RT_ZERO(*pSymFile);
994
995 /*
996 * The ELF header:
997 */
998 pSymFile->EHdr.e_ident[0] = ELFMAG0;
999 pSymFile->EHdr.e_ident[1] = ELFMAG1;
1000 pSymFile->EHdr.e_ident[2] = ELFMAG2;
1001 pSymFile->EHdr.e_ident[3] = ELFMAG3;
1002 pSymFile->EHdr.e_ident[EI_VERSION] = EV_CURRENT;
1003 pSymFile->EHdr.e_ident[EI_CLASS] = ELFCLASS64;
1004 pSymFile->EHdr.e_ident[EI_DATA] = ELFDATA2LSB;
1005 pSymFile->EHdr.e_ident[EI_OSABI] = ELFOSABI_NONE;
1006# ifdef IEMNATIVE_USE_GDB_JIT_ET_DYN
1007 pSymFile->EHdr.e_type = ET_DYN;
1008# else
1009 pSymFile->EHdr.e_type = ET_REL;
1010# endif
1011# ifdef RT_ARCH_AMD64
1012 pSymFile->EHdr.e_machine = EM_AMD64;
1013# elif defined(RT_ARCH_ARM64)
1014 pSymFile->EHdr.e_machine = EM_AARCH64;
1015# else
1016# error "port me"
1017# endif
1018 pSymFile->EHdr.e_version = 1; /*?*/
1019 pSymFile->EHdr.e_entry = 0;
1020# if defined(IEMNATIVE_USE_GDB_JIT_ET_DYN)
1021 pSymFile->EHdr.e_phoff = RT_UOFFSETOF(GDBJITSYMFILE, aPhdrs);
1022# else
1023 pSymFile->EHdr.e_phoff = 0;
1024# endif
1025 pSymFile->EHdr.e_shoff = sizeof(pSymFile->EHdr);
1026 pSymFile->EHdr.e_flags = 0;
1027 pSymFile->EHdr.e_ehsize = sizeof(pSymFile->EHdr);
1028# if defined(IEMNATIVE_USE_GDB_JIT_ET_DYN)
1029 pSymFile->EHdr.e_phentsize = sizeof(pSymFile->aPhdrs[0]);
1030 pSymFile->EHdr.e_phnum = RT_ELEMENTS(pSymFile->aPhdrs);
1031# else
1032 pSymFile->EHdr.e_phentsize = 0;
1033 pSymFile->EHdr.e_phnum = 0;
1034# endif
1035 pSymFile->EHdr.e_shentsize = sizeof(pSymFile->aShdrs[0]);
1036 pSymFile->EHdr.e_shnum = RT_ELEMENTS(pSymFile->aShdrs);
1037 pSymFile->EHdr.e_shstrndx = 0; /* set later */
1038
1039 uint32_t offStrTab = 0;
1040#define APPEND_STR(a_szStr) do { \
1041 memcpy(&pSymFile->szzStrTab[offStrTab], a_szStr, sizeof(a_szStr)); \
1042 offStrTab += sizeof(a_szStr); \
1043 Assert(offStrTab < sizeof(pSymFile->szzStrTab)); \
1044 } while (0)
1045#define APPEND_STR_FMT(a_szStr, ...) do { \
1046 offStrTab += RTStrPrintf(&pSymFile->szzStrTab[offStrTab], sizeof(pSymFile->szzStrTab) - offStrTab, a_szStr, __VA_ARGS__); \
1047 offStrTab++; \
1048 Assert(offStrTab < sizeof(pSymFile->szzStrTab)); \
1049 } while (0)
1050
1051 /*
1052 * Section headers.
1053 */
1054 /* Section header #0: NULL */
1055 unsigned i = 0;
1056 APPEND_STR("");
1057 RT_ZERO(pSymFile->aShdrs[i]);
1058 i++;
1059
1060 /* Section header: .eh_frame */
1061 pSymFile->aShdrs[i].sh_name = offStrTab;
1062 APPEND_STR(".eh_frame");
1063 pSymFile->aShdrs[i].sh_type = SHT_PROGBITS;
1064 pSymFile->aShdrs[i].sh_flags = SHF_ALLOC | SHF_EXECINSTR;
1065# if defined(IEMNATIVE_USE_GDB_JIT_ET_DYN) || defined(IEMNATIVE_USE_GDB_JIT_ELF_RVAS)
1066 pSymFile->aShdrs[i].sh_offset
1067 = pSymFile->aShdrs[i].sh_addr = RT_UOFFSETOF(GDBJITSYMFILE, abEhFrame);
1068# else
1069 pSymFile->aShdrs[i].sh_addr = (uintptr_t)&pSymFile->abEhFrame[0];
1070 pSymFile->aShdrs[i].sh_offset = 0;
1071# endif
1072
1073 pSymFile->aShdrs[i].sh_size = sizeof(pEhFrame->abEhFrame);
1074 pSymFile->aShdrs[i].sh_link = 0;
1075 pSymFile->aShdrs[i].sh_info = 0;
1076 pSymFile->aShdrs[i].sh_addralign = 1;
1077 pSymFile->aShdrs[i].sh_entsize = 0;
1078 memcpy(pSymFile->abEhFrame, pEhFrame->abEhFrame, sizeof(pEhFrame->abEhFrame));
1079 i++;
1080
1081 /* Section header: .shstrtab */
1082 unsigned const iShStrTab = i;
1083 pSymFile->EHdr.e_shstrndx = iShStrTab;
1084 pSymFile->aShdrs[i].sh_name = offStrTab;
1085 APPEND_STR(".shstrtab");
1086 pSymFile->aShdrs[i].sh_type = SHT_STRTAB;
1087 pSymFile->aShdrs[i].sh_flags = SHF_ALLOC;
1088# if defined(IEMNATIVE_USE_GDB_JIT_ET_DYN) || defined(IEMNATIVE_USE_GDB_JIT_ELF_RVAS)
1089 pSymFile->aShdrs[i].sh_offset
1090 = pSymFile->aShdrs[i].sh_addr = RT_UOFFSETOF(GDBJITSYMFILE, szzStrTab);
1091# else
1092 pSymFile->aShdrs[i].sh_addr = (uintptr_t)&pSymFile->szzStrTab[0];
1093 pSymFile->aShdrs[i].sh_offset = 0;
1094# endif
1095 pSymFile->aShdrs[i].sh_size = sizeof(pSymFile->szzStrTab);
1096 pSymFile->aShdrs[i].sh_link = 0;
1097 pSymFile->aShdrs[i].sh_info = 0;
1098 pSymFile->aShdrs[i].sh_addralign = 1;
1099 pSymFile->aShdrs[i].sh_entsize = 0;
1100 i++;
1101
1102 /* Section header: .symbols */
1103 pSymFile->aShdrs[i].sh_name = offStrTab;
1104 APPEND_STR(".symtab");
1105 pSymFile->aShdrs[i].sh_type = SHT_SYMTAB;
1106 pSymFile->aShdrs[i].sh_flags = SHF_ALLOC;
1107 pSymFile->aShdrs[i].sh_offset
1108 = pSymFile->aShdrs[i].sh_addr = RT_UOFFSETOF(GDBJITSYMFILE, aSymbols);
1109 pSymFile->aShdrs[i].sh_size = sizeof(pSymFile->aSymbols);
1110 pSymFile->aShdrs[i].sh_link = iShStrTab;
1111 pSymFile->aShdrs[i].sh_info = RT_ELEMENTS(pSymFile->aSymbols);
1112 pSymFile->aShdrs[i].sh_addralign = sizeof(pSymFile->aSymbols[0].st_value);
1113 pSymFile->aShdrs[i].sh_entsize = sizeof(pSymFile->aSymbols[0]);
1114 i++;
1115
1116# if defined(IEMNATIVE_USE_GDB_JIT_ET_DYN)
1117 /* Section header: .symbols */
1118 pSymFile->aShdrs[i].sh_name = offStrTab;
1119 APPEND_STR(".dynsym");
1120 pSymFile->aShdrs[i].sh_type = SHT_DYNSYM;
1121 pSymFile->aShdrs[i].sh_flags = SHF_ALLOC;
1122 pSymFile->aShdrs[i].sh_offset
1123 = pSymFile->aShdrs[i].sh_addr = RT_UOFFSETOF(GDBJITSYMFILE, aDynSyms);
1124 pSymFile->aShdrs[i].sh_size = sizeof(pSymFile->aDynSyms);
1125 pSymFile->aShdrs[i].sh_link = iShStrTab;
1126 pSymFile->aShdrs[i].sh_info = RT_ELEMENTS(pSymFile->aDynSyms);
1127 pSymFile->aShdrs[i].sh_addralign = sizeof(pSymFile->aDynSyms[0].st_value);
1128 pSymFile->aShdrs[i].sh_entsize = sizeof(pSymFile->aDynSyms[0]);
1129 i++;
1130# endif
1131
1132# if defined(IEMNATIVE_USE_GDB_JIT_ET_DYN)
1133 /* Section header: .dynamic */
1134 pSymFile->aShdrs[i].sh_name = offStrTab;
1135 APPEND_STR(".dynamic");
1136 pSymFile->aShdrs[i].sh_type = SHT_DYNAMIC;
1137 pSymFile->aShdrs[i].sh_flags = SHF_ALLOC;
1138 pSymFile->aShdrs[i].sh_offset
1139 = pSymFile->aShdrs[i].sh_addr = RT_UOFFSETOF(GDBJITSYMFILE, aDyn);
1140 pSymFile->aShdrs[i].sh_size = sizeof(pSymFile->aDyn);
1141 pSymFile->aShdrs[i].sh_link = iShStrTab;
1142 pSymFile->aShdrs[i].sh_info = 0;
1143 pSymFile->aShdrs[i].sh_addralign = 1;
1144 pSymFile->aShdrs[i].sh_entsize = sizeof(pSymFile->aDyn[0]);
1145 i++;
1146# endif
1147
1148 /* Section header: .text */
1149 unsigned const iShText = i;
1150 pSymFile->aShdrs[i].sh_name = offStrTab;
1151 APPEND_STR(".text");
1152 pSymFile->aShdrs[i].sh_type = SHT_PROGBITS;
1153 pSymFile->aShdrs[i].sh_flags = SHF_ALLOC | SHF_EXECINSTR;
1154# if defined(IEMNATIVE_USE_GDB_JIT_ET_DYN) || defined(IEMNATIVE_USE_GDB_JIT_ELF_RVAS)
1155 pSymFile->aShdrs[i].sh_offset
1156 = pSymFile->aShdrs[i].sh_addr = sizeof(GDBJITSYMFILE);
1157# else
1158 pSymFile->aShdrs[i].sh_addr = (uintptr_t)(pSymFile + 1);
1159 pSymFile->aShdrs[i].sh_offset = 0;
1160# endif
1161 pSymFile->aShdrs[i].sh_size = pExecMemAllocator->cbChunk - offSymFileInChunk - sizeof(GDBJITSYMFILE);
1162 pSymFile->aShdrs[i].sh_link = 0;
1163 pSymFile->aShdrs[i].sh_info = 0;
1164 pSymFile->aShdrs[i].sh_addralign = 1;
1165 pSymFile->aShdrs[i].sh_entsize = 0;
1166 i++;
1167
1168 Assert(i == RT_ELEMENTS(pSymFile->aShdrs));
1169
1170# if defined(IEMNATIVE_USE_GDB_JIT_ET_DYN)
1171 /*
1172 * The program headers:
1173 */
1174 /* Everything in a single LOAD segment: */
1175 i = 0;
1176 pSymFile->aPhdrs[i].p_type = PT_LOAD;
1177 pSymFile->aPhdrs[i].p_flags = PF_X | PF_R;
1178 pSymFile->aPhdrs[i].p_offset
1179 = pSymFile->aPhdrs[i].p_vaddr
1180 = pSymFile->aPhdrs[i].p_paddr = 0;
1181 pSymFile->aPhdrs[i].p_filesz /* Size of segment in file. */
1182 = pSymFile->aPhdrs[i].p_memsz = pExecMemAllocator->cbChunk - offSymFileInChunk;
1183 pSymFile->aPhdrs[i].p_align = HOST_PAGE_SIZE;
1184 i++;
1185 /* The .dynamic segment. */
1186 pSymFile->aPhdrs[i].p_type = PT_DYNAMIC;
1187 pSymFile->aPhdrs[i].p_flags = PF_R;
1188 pSymFile->aPhdrs[i].p_offset
1189 = pSymFile->aPhdrs[i].p_vaddr
1190 = pSymFile->aPhdrs[i].p_paddr = RT_UOFFSETOF(GDBJITSYMFILE, aDyn);
1191 pSymFile->aPhdrs[i].p_filesz /* Size of segment in file. */
1192 = pSymFile->aPhdrs[i].p_memsz = sizeof(pSymFile->aDyn);
1193 pSymFile->aPhdrs[i].p_align = sizeof(pSymFile->aDyn[0].d_tag);
1194 i++;
1195
1196 Assert(i == RT_ELEMENTS(pSymFile->aPhdrs));
1197
1198 /*
1199 * The dynamic section:
1200 */
1201 i = 0;
1202 pSymFile->aDyn[i].d_tag = DT_SONAME;
1203 pSymFile->aDyn[i].d_un.d_val = offStrTab;
1204 APPEND_STR_FMT("iem-exec-chunk-%u-%u", pVCpu->idCpu, idxChunk);
1205 i++;
1206 pSymFile->aDyn[i].d_tag = DT_STRTAB;
1207 pSymFile->aDyn[i].d_un.d_ptr = RT_UOFFSETOF(GDBJITSYMFILE, szzStrTab);
1208 i++;
1209 pSymFile->aDyn[i].d_tag = DT_STRSZ;
1210 pSymFile->aDyn[i].d_un.d_val = sizeof(pSymFile->szzStrTab);
1211 i++;
1212 pSymFile->aDyn[i].d_tag = DT_SYMTAB;
1213 pSymFile->aDyn[i].d_un.d_ptr = RT_UOFFSETOF(GDBJITSYMFILE, aDynSyms);
1214 i++;
1215 pSymFile->aDyn[i].d_tag = DT_SYMENT;
1216 pSymFile->aDyn[i].d_un.d_val = sizeof(pSymFile->aDynSyms[0]);
1217 i++;
1218 pSymFile->aDyn[i].d_tag = DT_NULL;
1219 i++;
1220 Assert(i == RT_ELEMENTS(pSymFile->aDyn));
1221# endif /* IEMNATIVE_USE_GDB_JIT_ET_DYN */
1222
1223 /*
1224 * Symbol tables:
1225 */
1226 /** @todo gdb doesn't seem to really like this ... */
1227 i = 0;
1228 pSymFile->aSymbols[i].st_name = 0;
1229 pSymFile->aSymbols[i].st_shndx = SHN_UNDEF;
1230 pSymFile->aSymbols[i].st_value = 0;
1231 pSymFile->aSymbols[i].st_size = 0;
1232 pSymFile->aSymbols[i].st_info = ELF64_ST_INFO(STB_LOCAL, STT_NOTYPE);
1233 pSymFile->aSymbols[i].st_other = 0 /* STV_DEFAULT */;
1234# ifdef IEMNATIVE_USE_GDB_JIT_ET_DYN
1235 pSymFile->aDynSyms[0] = pSymFile->aSymbols[i];
1236# endif
1237 i++;
1238
1239 pSymFile->aSymbols[i].st_name = 0;
1240 pSymFile->aSymbols[i].st_shndx = SHN_ABS;
1241 pSymFile->aSymbols[i].st_value = 0;
1242 pSymFile->aSymbols[i].st_size = 0;
1243 pSymFile->aSymbols[i].st_info = ELF64_ST_INFO(STB_LOCAL, STT_FILE);
1244 pSymFile->aSymbols[i].st_other = 0 /* STV_DEFAULT */;
1245 i++;
1246
1247 pSymFile->aSymbols[i].st_name = offStrTab;
1248 APPEND_STR_FMT("iem_exec_chunk_%u_%u", pVCpu->idCpu, idxChunk);
1249# if 0
1250 pSymFile->aSymbols[i].st_shndx = iShText;
1251 pSymFile->aSymbols[i].st_value = 0;
1252# else
1253 pSymFile->aSymbols[i].st_shndx = SHN_ABS;
1254 pSymFile->aSymbols[i].st_value = (uintptr_t)(pSymFile + 1);
1255# endif
1256 pSymFile->aSymbols[i].st_size = pSymFile->aShdrs[iShText].sh_size;
1257 pSymFile->aSymbols[i].st_info = ELF64_ST_INFO(STB_GLOBAL, STT_FUNC);
1258 pSymFile->aSymbols[i].st_other = 0 /* STV_DEFAULT */;
1259# ifdef IEMNATIVE_USE_GDB_JIT_ET_DYN
1260 pSymFile->aDynSyms[1] = pSymFile->aSymbols[i];
1261 pSymFile->aDynSyms[1].st_value = (uintptr_t)(pSymFile + 1);
1262# endif
1263 i++;
1264
1265 Assert(i == RT_ELEMENTS(pSymFile->aSymbols));
1266 Assert(offStrTab < sizeof(pSymFile->szzStrTab));
1267
1268 /*
1269 * The GDB JIT entry and informing GDB.
1270 */
1271 pEhFrame->GdbJitEntry.pbSymFile = (uint8_t *)pSymFile;
1272# if 1
1273 pEhFrame->GdbJitEntry.cbSymFile = pExecMemAllocator->cbChunk - ((uintptr_t)pSymFile - (uintptr_t)pvChunk);
1274# else
1275 pEhFrame->GdbJitEntry.cbSymFile = sizeof(GDBJITSYMFILE);
1276# endif
1277
1278 RTOnce(&g_IemNativeGdbJitOnce, iemNativeGdbJitInitOnce, NULL);
1279 RTCritSectEnter(&g_IemNativeGdbJitLock);
1280 pEhFrame->GdbJitEntry.pNext = NULL;
1281 pEhFrame->GdbJitEntry.pPrev = __jit_debug_descriptor.pTail;
1282 if (__jit_debug_descriptor.pTail)
1283 __jit_debug_descriptor.pTail->pNext = &pEhFrame->GdbJitEntry;
1284 else
1285 __jit_debug_descriptor.pHead = &pEhFrame->GdbJitEntry;
1286 __jit_debug_descriptor.pTail = &pEhFrame->GdbJitEntry;
1287 __jit_debug_descriptor.pRelevant = &pEhFrame->GdbJitEntry;
1288
1289 /* Notify GDB: */
1290 __jit_debug_descriptor.enmAction = kGdbJitaction_Register;
1291 __jit_debug_register_code();
1292 __jit_debug_descriptor.enmAction = kGdbJitaction_NoAction;
1293 RTCritSectLeave(&g_IemNativeGdbJitLock);
1294
1295# else /* !IEMNATIVE_USE_GDB_JIT */
1296 RT_NOREF(pVCpu);
1297# endif /* !IEMNATIVE_USE_GDB_JIT */
1298
1299 return VINF_SUCCESS;
1300}
1301
1302# endif /* !RT_OS_WINDOWS */
1303#endif /* IN_RING3 */
1304
1305
1306/**
1307 * Adds another chunk to the executable memory allocator.
1308 *
1309 * This is used by the init code for the initial allocation and later by the
1310 * regular allocator function when it's out of memory.
1311 */
1312static int iemExecMemAllocatorGrow(PVMCPUCC pVCpu, PIEMEXECMEMALLOCATOR pExecMemAllocator)
1313{
1314 /* Check that we've room for growth. */
1315 uint32_t const idxChunk = pExecMemAllocator->cChunks;
1316 AssertLogRelReturn(idxChunk < pExecMemAllocator->cMaxChunks, VERR_OUT_OF_RESOURCES);
1317
1318 /* Allocate a chunk. */
1319#ifdef RT_OS_DARWIN
1320 void *pvChunk = RTMemPageAllocEx(pExecMemAllocator->cbChunk, 0);
1321#else
1322 void *pvChunk = RTMemPageAllocEx(pExecMemAllocator->cbChunk, RTMEMPAGEALLOC_F_EXECUTABLE);
1323#endif
1324 AssertLogRelReturn(pvChunk, VERR_NO_EXEC_MEMORY);
1325
1326#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
1327 int rc = VINF_SUCCESS;
1328#else
1329 /* Initialize the heap for the chunk. */
1330 RTHEAPSIMPLE hHeap = NIL_RTHEAPSIMPLE;
1331 int rc = RTHeapSimpleInit(&hHeap, pvChunk, pExecMemAllocator->cbChunk);
1332 AssertRC(rc);
1333 if (RT_SUCCESS(rc))
1334 {
1335 /*
1336 * We want the memory to be aligned on 64 byte, so the first time thru
1337 * here we do some exploratory allocations to see how we can achieve this.
1338 * On subsequent runs we only make an initial adjustment allocation, if
1339 * necessary.
1340 *
1341 * Since we own the heap implementation, we know that the internal block
1342 * header is 32 bytes in size for 64-bit systems (see RTHEAPSIMPLEBLOCK),
1343 * so all we need to wrt allocation size adjustments is to add 32 bytes
1344 * to the size, align up by 64 bytes, and subtract 32 bytes.
1345 *
1346 * The heap anchor block is 8 * sizeof(void *) (see RTHEAPSIMPLEINTERNAL),
1347 * which mean 64 bytes on a 64-bit system, so we need to make a 64 byte
1348 * allocation to force subsequent allocations to return 64 byte aligned
1349 * user areas.
1350 */
1351 if (!pExecMemAllocator->cbHeapBlockHdr)
1352 {
1353 pExecMemAllocator->cbHeapBlockHdr = sizeof(void *) * 4; /* See RTHEAPSIMPLEBLOCK. */
1354 pExecMemAllocator->cbHeapAlignTweak = 64;
1355 pExecMemAllocator->pvAlignTweak = RTHeapSimpleAlloc(hHeap, pExecMemAllocator->cbHeapAlignTweak,
1356 32 /*cbAlignment*/);
1357 AssertStmt(pExecMemAllocator->pvAlignTweak, rc = VERR_INTERNAL_ERROR_2);
1358
1359 void *pvTest1 = RTHeapSimpleAlloc(hHeap,
1360 RT_ALIGN_32(256 + pExecMemAllocator->cbHeapBlockHdr, 64)
1361 - pExecMemAllocator->cbHeapBlockHdr, 32 /*cbAlignment*/);
1362 AssertStmt(pvTest1, rc = VERR_INTERNAL_ERROR_2);
1363 AssertStmt(!((uintptr_t)pvTest1 & 63), rc = VERR_INTERNAL_ERROR_3);
1364
1365 void *pvTest2 = RTHeapSimpleAlloc(hHeap,
1366 RT_ALIGN_32(687 + pExecMemAllocator->cbHeapBlockHdr, 64)
1367 - pExecMemAllocator->cbHeapBlockHdr, 32 /*cbAlignment*/);
1368 AssertStmt(pvTest2, rc = VERR_INTERNAL_ERROR_2);
1369 AssertStmt(!((uintptr_t)pvTest2 & 63), rc = VERR_INTERNAL_ERROR_3);
1370
1371 RTHeapSimpleFree(hHeap, pvTest2);
1372 RTHeapSimpleFree(hHeap, pvTest1);
1373 }
1374 else
1375 {
1376 pExecMemAllocator->pvAlignTweak = RTHeapSimpleAlloc(hHeap, pExecMemAllocator->cbHeapAlignTweak, 32 /*cbAlignment*/);
1377 AssertStmt(pExecMemAllocator->pvAlignTweak, rc = VERR_INTERNAL_ERROR_4);
1378 }
1379 if (RT_SUCCESS(rc))
1380#endif /* !IEMEXECMEM_USE_ALT_SUB_ALLOCATOR */
1381 {
1382 /*
1383 * Add the chunk.
1384 *
1385 * This must be done before the unwind init so windows can allocate
1386 * memory from the chunk when using the alternative sub-allocator.
1387 */
1388 pExecMemAllocator->aChunks[idxChunk].pvChunk = pvChunk;
1389#ifdef IN_RING3
1390 pExecMemAllocator->aChunks[idxChunk].pvUnwindInfo = NULL;
1391#endif
1392#ifndef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
1393 pExecMemAllocator->aChunks[idxChunk].hHeap = hHeap;
1394#else
1395 pExecMemAllocator->aChunks[idxChunk].cFreeUnits = pExecMemAllocator->cUnitsPerChunk;
1396 pExecMemAllocator->aChunks[idxChunk].idxFreeHint = 0;
1397 memset(&pExecMemAllocator->pbmAlloc[pExecMemAllocator->cBitmapElementsPerChunk * idxChunk],
1398 0, sizeof(pExecMemAllocator->pbmAlloc[0]) * pExecMemAllocator->cBitmapElementsPerChunk);
1399#endif
1400
1401 pExecMemAllocator->cChunks = idxChunk + 1;
1402 pExecMemAllocator->idxChunkHint = idxChunk;
1403
1404#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
1405 pExecMemAllocator->cbTotal += pExecMemAllocator->cbChunk;
1406 pExecMemAllocator->cbFree += pExecMemAllocator->cbChunk;
1407#else
1408 size_t const cbFree = RTHeapSimpleGetFreeSize(hHeap);
1409 pExecMemAllocator->cbTotal += cbFree;
1410 pExecMemAllocator->cbFree += cbFree;
1411#endif
1412
1413#ifdef IN_RING3
1414 /*
1415 * Initialize the unwind information (this cannot really fail atm).
1416 * (This sets pvUnwindInfo.)
1417 */
1418 rc = iemExecMemAllocatorInitAndRegisterUnwindInfoForChunk(pVCpu, pExecMemAllocator, pvChunk, idxChunk);
1419 if (RT_SUCCESS(rc))
1420#endif
1421 {
1422 return VINF_SUCCESS;
1423 }
1424
1425#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
1426 /* Just in case the impossible happens, undo the above up: */
1427 pExecMemAllocator->cbTotal -= pExecMemAllocator->cbChunk;
1428 pExecMemAllocator->cbFree -= pExecMemAllocator->aChunks[idxChunk].cFreeUnits << IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SHIFT;
1429 pExecMemAllocator->cChunks = idxChunk;
1430 memset(&pExecMemAllocator->pbmAlloc[pExecMemAllocator->cBitmapElementsPerChunk * idxChunk],
1431 0xff, sizeof(pExecMemAllocator->pbmAlloc[0]) * pExecMemAllocator->cBitmapElementsPerChunk);
1432 pExecMemAllocator->aChunks[idxChunk].pvChunk = NULL;
1433 pExecMemAllocator->aChunks[idxChunk].cFreeUnits = 0;
1434#endif
1435 }
1436#ifndef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
1437 }
1438#endif
1439 RTMemPageFree(pvChunk, pExecMemAllocator->cbChunk);
1440 RT_NOREF(pVCpu);
1441 return rc;
1442}
1443
1444
1445/**
1446 * Initializes the executable memory allocator for native recompilation on the
1447 * calling EMT.
1448 *
1449 * @returns VBox status code.
1450 * @param pVCpu The cross context virtual CPU structure of the calling
1451 * thread.
1452 * @param cbMax The max size of the allocator.
1453 * @param cbInitial The initial allocator size.
1454 * @param cbChunk The chunk size, 0 or UINT32_MAX for default (@a cbMax
1455 * dependent).
1456 */
1457int iemExecMemAllocatorInit(PVMCPU pVCpu, uint64_t cbMax, uint64_t cbInitial, uint32_t cbChunk)
1458{
1459 /*
1460 * Validate input.
1461 */
1462 AssertLogRelMsgReturn(cbMax >= _1M && cbMax <= _4G+_4G, ("cbMax=%RU64 (%RX64)\n", cbMax, cbMax), VERR_OUT_OF_RANGE);
1463 AssertReturn(cbInitial <= cbMax, VERR_OUT_OF_RANGE);
1464 AssertLogRelMsgReturn( cbChunk != UINT32_MAX
1465 || cbChunk == 0
1466 || ( RT_IS_POWER_OF_TWO(cbChunk)
1467 && cbChunk >= _1M
1468 && cbChunk <= _256M
1469 && cbChunk <= cbMax),
1470 ("cbChunk=%RU32 (%RX32) cbMax=%RU64\n", cbChunk, cbChunk, cbMax),
1471 VERR_OUT_OF_RANGE);
1472
1473 /*
1474 * Adjust/figure out the chunk size.
1475 */
1476 if (cbChunk == 0 || cbChunk == UINT32_MAX)
1477 {
1478 if (cbMax >= _256M)
1479 cbChunk = _64M;
1480 else
1481 {
1482 if (cbMax < _16M)
1483 cbChunk = cbMax >= _4M ? _4M : (uint32_t)cbMax;
1484 else
1485 cbChunk = (uint32_t)cbMax / 4;
1486 if (!RT_IS_POWER_OF_TWO(cbChunk))
1487 cbChunk = RT_BIT_32(ASMBitLastSetU32(cbChunk));
1488 }
1489 }
1490
1491 if (cbChunk > cbMax)
1492 cbMax = cbChunk;
1493 else
1494 cbMax = (cbMax - 1 + cbChunk) / cbChunk * cbChunk;
1495 uint32_t const cMaxChunks = (uint32_t)(cbMax / cbChunk);
1496 AssertLogRelReturn((uint64_t)cMaxChunks * cbChunk == cbMax, VERR_INTERNAL_ERROR_3);
1497
1498 /*
1499 * Allocate and initialize the allocatore instance.
1500 */
1501 size_t cbNeeded = RT_UOFFSETOF_DYN(IEMEXECMEMALLOCATOR, aChunks[cMaxChunks]);
1502#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
1503 size_t const offBitmaps = RT_ALIGN_Z(cbNeeded, RT_CACHELINE_SIZE);
1504 size_t const cbBitmap = cbChunk >> (IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SHIFT + 3);
1505 cbNeeded += cbBitmap * cMaxChunks;
1506 AssertCompile(IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SHIFT <= 10);
1507 Assert(cbChunk > RT_BIT_32(IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SHIFT + 3));
1508#endif
1509#if defined(IN_RING3) && !defined(RT_OS_WINDOWS)
1510 size_t const offEhFrames = RT_ALIGN_Z(cbNeeded, RT_CACHELINE_SIZE);
1511 cbNeeded += sizeof(IEMEXECMEMCHUNKEHFRAME) * cMaxChunks;
1512#endif
1513 PIEMEXECMEMALLOCATOR pExecMemAllocator = (PIEMEXECMEMALLOCATOR)RTMemAllocZ(cbNeeded);
1514 AssertLogRelMsgReturn(pExecMemAllocator, ("cbNeeded=%zx cMaxChunks=%#x cbChunk=%#x\n", cbNeeded, cMaxChunks, cbChunk),
1515 VERR_NO_MEMORY);
1516 pExecMemAllocator->uMagic = IEMEXECMEMALLOCATOR_MAGIC;
1517 pExecMemAllocator->cbChunk = cbChunk;
1518 pExecMemAllocator->cMaxChunks = cMaxChunks;
1519 pExecMemAllocator->cChunks = 0;
1520 pExecMemAllocator->idxChunkHint = 0;
1521 pExecMemAllocator->cAllocations = 0;
1522 pExecMemAllocator->cbTotal = 0;
1523 pExecMemAllocator->cbFree = 0;
1524 pExecMemAllocator->cbAllocated = 0;
1525#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
1526 pExecMemAllocator->pbmAlloc = (uint64_t *)((uintptr_t)pExecMemAllocator + offBitmaps);
1527 pExecMemAllocator->cUnitsPerChunk = cbChunk >> IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SHIFT;
1528 pExecMemAllocator->cBitmapElementsPerChunk = cbChunk >> (IEMEXECMEM_ALT_SUB_ALLOC_UNIT_SHIFT + 6);
1529 memset(pExecMemAllocator->pbmAlloc, 0xff, cbBitmap); /* Mark everything as allocated. Clear when chunks are added. */
1530#endif
1531#if defined(IN_RING3) && !defined(RT_OS_WINDOWS)
1532 pExecMemAllocator->paEhFrames = (PIEMEXECMEMCHUNKEHFRAME)((uintptr_t)pExecMemAllocator + offEhFrames);
1533#endif
1534 for (uint32_t i = 0; i < cMaxChunks; i++)
1535 {
1536#ifdef IEMEXECMEM_USE_ALT_SUB_ALLOCATOR
1537 pExecMemAllocator->aChunks[i].cFreeUnits = 0;
1538 pExecMemAllocator->aChunks[i].idxFreeHint = 0;
1539#else
1540 pExecMemAllocator->aChunks[i].hHeap = NIL_RTHEAPSIMPLE;
1541#endif
1542 pExecMemAllocator->aChunks[i].pvChunk = NULL;
1543#ifdef IN_RING0
1544 pExecMemAllocator->aChunks[i].hMemObj = NIL_RTR0MEMOBJ;
1545#else
1546 pExecMemAllocator->aChunks[i].pvUnwindInfo = NULL;
1547#endif
1548 }
1549 pVCpu->iem.s.pExecMemAllocatorR3 = pExecMemAllocator;
1550
1551 /*
1552 * Do the initial allocations.
1553 */
1554 while (cbInitial < (uint64_t)pExecMemAllocator->cChunks * pExecMemAllocator->cbChunk)
1555 {
1556 int rc = iemExecMemAllocatorGrow(pVCpu, pExecMemAllocator);
1557 AssertLogRelRCReturn(rc, rc);
1558 }
1559
1560 pExecMemAllocator->idxChunkHint = 0;
1561
1562 return VINF_SUCCESS;
1563}
1564
1565
1566/*********************************************************************************************************************************
1567* Native Recompilation *
1568*********************************************************************************************************************************/
1569
1570
1571/**
1572 * Used by TB code when encountering a non-zero status or rcPassUp after a call.
1573 */
1574IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecStatusCodeFiddling,(PVMCPUCC pVCpu, int rc, uint8_t idxInstr))
1575{
1576 pVCpu->iem.s.cInstructions += idxInstr;
1577 return VBOXSTRICTRC_VAL(iemExecStatusCodeFiddling(pVCpu, rc == VINF_IEM_REEXEC_BREAK ? VINF_SUCCESS : rc));
1578}
1579
1580
1581/**
1582 * Used by TB code when it wants to raise a \#DE.
1583 */
1584IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseDe,(PVMCPUCC pVCpu))
1585{
1586 iemRaiseDivideErrorJmp(pVCpu);
1587#ifndef _MSC_VER
1588 return VINF_IEM_RAISED_XCPT; /* not reached */
1589#endif
1590}
1591
1592
1593/**
1594 * Used by TB code when it wants to raise a \#UD.
1595 */
1596IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseUd,(PVMCPUCC pVCpu))
1597{
1598 iemRaiseUndefinedOpcodeJmp(pVCpu);
1599#ifndef _MSC_VER
1600 return VINF_IEM_RAISED_XCPT; /* not reached */
1601#endif
1602}
1603
1604
1605/**
1606 * Used by TB code when it wants to raise an SSE related \#UD or \#NM.
1607 *
1608 * See IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT.
1609 */
1610IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseSseRelated,(PVMCPUCC pVCpu))
1611{
1612 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
1613 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
1614 iemRaiseUndefinedOpcodeJmp(pVCpu);
1615 else
1616 iemRaiseDeviceNotAvailableJmp(pVCpu);
1617#ifndef _MSC_VER
1618 return VINF_IEM_RAISED_XCPT; /* not reached */
1619#endif
1620}
1621
1622
1623/**
1624 * Used by TB code when it wants to raise an AVX related \#UD or \#NM.
1625 *
1626 * See IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT.
1627 */
1628IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseAvxRelated,(PVMCPUCC pVCpu))
1629{
1630 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE)
1631 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
1632 iemRaiseUndefinedOpcodeJmp(pVCpu);
1633 else
1634 iemRaiseDeviceNotAvailableJmp(pVCpu);
1635#ifndef _MSC_VER
1636 return VINF_IEM_RAISED_XCPT; /* not reached */
1637#endif
1638}
1639
1640
1641/**
1642 * Used by TB code when it wants to raise a \#NM.
1643 */
1644IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseNm,(PVMCPUCC pVCpu))
1645{
1646 iemRaiseDeviceNotAvailableJmp(pVCpu);
1647#ifndef _MSC_VER
1648 return VINF_IEM_RAISED_XCPT; /* not reached */
1649#endif
1650}
1651
1652
1653/**
1654 * Used by TB code when it wants to raise a \#GP(0).
1655 */
1656IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseGp0,(PVMCPUCC pVCpu))
1657{
1658 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1659#ifndef _MSC_VER
1660 return VINF_IEM_RAISED_XCPT; /* not reached */
1661#endif
1662}
1663
1664
1665/**
1666 * Used by TB code when it wants to raise a \#MF.
1667 */
1668IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseMf,(PVMCPUCC pVCpu))
1669{
1670 iemRaiseMathFaultJmp(pVCpu);
1671#ifndef _MSC_VER
1672 return VINF_IEM_RAISED_XCPT; /* not reached */
1673#endif
1674}
1675
1676
1677/**
1678 * Used by TB code when it wants to raise a \#XF.
1679 */
1680IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpExecRaiseXf,(PVMCPUCC pVCpu))
1681{
1682 iemRaiseSimdFpExceptionJmp(pVCpu);
1683#ifndef _MSC_VER
1684 return VINF_IEM_RAISED_XCPT; /* not reached */
1685#endif
1686}
1687
1688
1689/**
1690 * Used by TB code when detecting opcode changes.
1691 * @see iemThreadeFuncWorkerObsoleteTb
1692 */
1693IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpObsoleteTb,(PVMCPUCC pVCpu))
1694{
1695 /* We set fSafeToFree to false where as we're being called in the context
1696 of a TB callback function, which for native TBs means we cannot release
1697 the executable memory till we've returned our way back to iemTbExec as
1698 that return path codes via the native code generated for the TB. */
1699 Log7(("TB obsolete: %p at %04x:%08RX64\n", pVCpu->iem.s.pCurTbR3, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1700 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3, false /*fSafeToFree*/);
1701 return VINF_IEM_REEXEC_BREAK;
1702}
1703
1704
1705/**
1706 * Used by TB code when we need to switch to a TB with CS.LIM checking.
1707 */
1708IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpNeedCsLimChecking,(PVMCPUCC pVCpu))
1709{
1710 Log7(("TB need CS.LIM: %p at %04x:%08RX64; offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n",
1711 pVCpu->iem.s.pCurTbR3, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
1712 (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.rip,
1713 pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base));
1714 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckNeedCsLimChecking);
1715 return VINF_IEM_REEXEC_BREAK;
1716}
1717
1718
1719/**
1720 * Used by TB code when we missed a PC check after a branch.
1721 */
1722IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpCheckBranchMiss,(PVMCPUCC pVCpu))
1723{
1724 Log7(("TB jmp miss: %p at %04x:%08RX64; GCPhysWithOffset=%RGp, pbInstrBuf=%p\n",
1725 pVCpu->iem.s.pCurTbR3, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
1726 pVCpu->iem.s.GCPhysInstrBuf + pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base - pVCpu->iem.s.uInstrBufPc,
1727 pVCpu->iem.s.pbInstrBuf));
1728 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses);
1729 return VINF_IEM_REEXEC_BREAK;
1730}
1731
1732
1733
1734/*********************************************************************************************************************************
1735* Helpers: Segmented memory fetches and stores. *
1736*********************************************************************************************************************************/
1737
1738/**
1739 * Used by TB code to load unsigned 8-bit data w/ segmentation.
1740 */
1741IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU8,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
1742{
1743#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
1744 return (uint64_t)iemMemFetchDataU8SafeJmp(pVCpu, iSegReg, GCPtrMem);
1745#else
1746 return (uint64_t)iemMemFetchDataU8Jmp(pVCpu, iSegReg, GCPtrMem);
1747#endif
1748}
1749
1750
1751/**
1752 * Used by TB code to load signed 8-bit data w/ segmentation, sign extending it
1753 * to 16 bits.
1754 */
1755IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU8_Sx_U16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
1756{
1757#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
1758 return (uint64_t)(uint16_t)(int16_t)(int8_t)iemMemFetchDataU8SafeJmp(pVCpu, iSegReg, GCPtrMem);
1759#else
1760 return (uint64_t)(uint16_t)(int16_t)(int8_t)iemMemFetchDataU8Jmp(pVCpu, iSegReg, GCPtrMem);
1761#endif
1762}
1763
1764
1765/**
1766 * Used by TB code to load signed 8-bit data w/ segmentation, sign extending it
1767 * to 32 bits.
1768 */
1769IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU8_Sx_U32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
1770{
1771#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
1772 return (uint64_t)(uint32_t)(int32_t)(int8_t)iemMemFetchDataU8SafeJmp(pVCpu, iSegReg, GCPtrMem);
1773#else
1774 return (uint64_t)(uint32_t)(int32_t)(int8_t)iemMemFetchDataU8Jmp(pVCpu, iSegReg, GCPtrMem);
1775#endif
1776}
1777
1778/**
1779 * Used by TB code to load signed 8-bit data w/ segmentation, sign extending it
1780 * to 64 bits.
1781 */
1782IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU8_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
1783{
1784#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
1785 return (uint64_t)(int64_t)(int8_t)iemMemFetchDataU8SafeJmp(pVCpu, iSegReg, GCPtrMem);
1786#else
1787 return (uint64_t)(int64_t)(int8_t)iemMemFetchDataU8Jmp(pVCpu, iSegReg, GCPtrMem);
1788#endif
1789}
1790
1791
1792/**
1793 * Used by TB code to load unsigned 16-bit data w/ segmentation.
1794 */
1795IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
1796{
1797#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
1798 return (uint64_t)iemMemFetchDataU16SafeJmp(pVCpu, iSegReg, GCPtrMem);
1799#else
1800 return (uint64_t)iemMemFetchDataU16Jmp(pVCpu, iSegReg, GCPtrMem);
1801#endif
1802}
1803
1804
1805/**
1806 * Used by TB code to load signed 16-bit data w/ segmentation, sign extending it
1807 * to 32 bits.
1808 */
1809IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU16_Sx_U32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
1810{
1811#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
1812 return (uint64_t)(uint32_t)(int32_t)(int16_t)iemMemFetchDataU16SafeJmp(pVCpu, iSegReg, GCPtrMem);
1813#else
1814 return (uint64_t)(uint32_t)(int32_t)(int16_t)iemMemFetchDataU16Jmp(pVCpu, iSegReg, GCPtrMem);
1815#endif
1816}
1817
1818
1819/**
1820 * Used by TB code to load signed 16-bit data w/ segmentation, sign extending it
1821 * to 64 bits.
1822 */
1823IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU16_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
1824{
1825#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
1826 return (uint64_t)(int64_t)(int16_t)iemMemFetchDataU16SafeJmp(pVCpu, iSegReg, GCPtrMem);
1827#else
1828 return (uint64_t)(int64_t)(int16_t)iemMemFetchDataU16Jmp(pVCpu, iSegReg, GCPtrMem);
1829#endif
1830}
1831
1832
1833/**
1834 * Used by TB code to load unsigned 32-bit data w/ segmentation.
1835 */
1836IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
1837{
1838#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
1839 return (uint64_t)iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
1840#else
1841 return (uint64_t)iemMemFetchDataU32Jmp(pVCpu, iSegReg, GCPtrMem);
1842#endif
1843}
1844
1845
1846/**
1847 * Used by TB code to load signed 32-bit data w/ segmentation, sign extending it
1848 * to 64 bits.
1849 */
1850IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU32_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
1851{
1852#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
1853 return (uint64_t)(int64_t)(int32_t)iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
1854#else
1855 return (uint64_t)(int64_t)(int32_t)iemMemFetchDataU32Jmp(pVCpu, iSegReg, GCPtrMem);
1856#endif
1857}
1858
1859
1860/**
1861 * Used by TB code to load unsigned 64-bit data w/ segmentation.
1862 */
1863IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFetchDataU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg))
1864{
1865#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
1866 return iemMemFetchDataU64SafeJmp(pVCpu, iSegReg, GCPtrMem);
1867#else
1868 return iemMemFetchDataU64Jmp(pVCpu, iSegReg, GCPtrMem);
1869#endif
1870}
1871
1872
1873#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
1874/**
1875 * Used by TB code to load 128-bit data w/ segmentation.
1876 */
1877IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFetchDataU128AlignedSse,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PRTUINT128U pu128Dst))
1878{
1879#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
1880 iemMemFetchDataU128AlignedSseSafeJmp(pVCpu, pu128Dst, iSegReg, GCPtrMem);
1881#else
1882 iemMemFetchDataU128AlignedSseJmp(pVCpu, pu128Dst, iSegReg, GCPtrMem);
1883#endif
1884}
1885
1886
1887/**
1888 * Used by TB code to load 128-bit data w/ segmentation.
1889 */
1890IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFetchDataU128NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, PRTUINT128U pu128Dst))
1891{
1892#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
1893 iemMemFetchDataU128NoAcSafeJmp(pVCpu, pu128Dst, iSegReg, GCPtrMem);
1894#else
1895 iemMemFetchDataU128NoAcJmp(pVCpu, pu128Dst, iSegReg, GCPtrMem);
1896#endif
1897}
1898#endif
1899
1900
1901/**
1902 * Used by TB code to store unsigned 8-bit data w/ segmentation.
1903 */
1904IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemStoreDataU8,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, uint8_t u8Value))
1905{
1906#ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
1907 iemMemStoreDataU8SafeJmp(pVCpu, iSegReg, GCPtrMem, u8Value);
1908#else
1909 iemMemStoreDataU8Jmp(pVCpu, iSegReg, GCPtrMem, u8Value);
1910#endif
1911}
1912
1913
1914/**
1915 * Used by TB code to store unsigned 16-bit data w/ segmentation.
1916 */
1917IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemStoreDataU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, uint16_t u16Value))
1918{
1919#ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
1920 iemMemStoreDataU16SafeJmp(pVCpu, iSegReg, GCPtrMem, u16Value);
1921#else
1922 iemMemStoreDataU16Jmp(pVCpu, iSegReg, GCPtrMem, u16Value);
1923#endif
1924}
1925
1926
1927/**
1928 * Used by TB code to store unsigned 32-bit data w/ segmentation.
1929 */
1930IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemStoreDataU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, uint32_t u32Value))
1931{
1932#ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
1933 iemMemStoreDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem, u32Value);
1934#else
1935 iemMemStoreDataU32Jmp(pVCpu, iSegReg, GCPtrMem, u32Value);
1936#endif
1937}
1938
1939
1940/**
1941 * Used by TB code to store unsigned 64-bit data w/ segmentation.
1942 */
1943IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemStoreDataU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t iSegReg, uint64_t u64Value))
1944{
1945#ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
1946 iemMemStoreDataU64SafeJmp(pVCpu, iSegReg, GCPtrMem, u64Value);
1947#else
1948 iemMemStoreDataU64Jmp(pVCpu, iSegReg, GCPtrMem, u64Value);
1949#endif
1950}
1951
1952
1953
1954/**
1955 * Used by TB code to store an unsigned 16-bit value onto a generic stack.
1956 */
1957IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackStoreU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t u16Value))
1958{
1959#ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
1960 iemMemStoreStackU16SafeJmp(pVCpu, GCPtrMem, u16Value);
1961#else
1962 iemMemStoreStackU16Jmp(pVCpu, GCPtrMem, u16Value);
1963#endif
1964}
1965
1966
1967/**
1968 * Used by TB code to store an unsigned 32-bit value onto a generic stack.
1969 */
1970IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackStoreU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value))
1971{
1972#ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
1973 iemMemStoreStackU32SafeJmp(pVCpu, GCPtrMem, u32Value);
1974#else
1975 iemMemStoreStackU32Jmp(pVCpu, GCPtrMem, u32Value);
1976#endif
1977}
1978
1979
1980/**
1981 * Used by TB code to store an 32-bit selector value onto a generic stack.
1982 *
1983 * Intel CPUs doesn't do write a whole dword, thus the special function.
1984 */
1985IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackStoreU32SReg,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value))
1986{
1987#ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
1988 iemMemStoreStackU32SRegSafeJmp(pVCpu, GCPtrMem, u32Value);
1989#else
1990 iemMemStoreStackU32SRegJmp(pVCpu, GCPtrMem, u32Value);
1991#endif
1992}
1993
1994
1995/**
1996 * Used by TB code to push unsigned 64-bit value onto a generic stack.
1997 */
1998IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackStoreU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t u64Value))
1999{
2000#ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
2001 iemMemStoreStackU64SafeJmp(pVCpu, GCPtrMem, u64Value);
2002#else
2003 iemMemStoreStackU64Jmp(pVCpu, GCPtrMem, u64Value);
2004#endif
2005}
2006
2007
2008/**
2009 * Used by TB code to fetch an unsigned 16-bit item off a generic stack.
2010 */
2011IEM_DECL_NATIVE_HLP_DEF(uint16_t, iemNativeHlpStackFetchU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2012{
2013#ifdef IEMNATIVE_WITH_TLB_LOOKUP_POP
2014 return iemMemFetchStackU16SafeJmp(pVCpu, GCPtrMem);
2015#else
2016 return iemMemFetchStackU16Jmp(pVCpu, GCPtrMem);
2017#endif
2018}
2019
2020
2021/**
2022 * Used by TB code to fetch an unsigned 32-bit item off a generic stack.
2023 */
2024IEM_DECL_NATIVE_HLP_DEF(uint32_t, iemNativeHlpStackFetchU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2025{
2026#ifdef IEMNATIVE_WITH_TLB_LOOKUP_POP
2027 return iemMemFetchStackU32SafeJmp(pVCpu, GCPtrMem);
2028#else
2029 return iemMemFetchStackU32Jmp(pVCpu, GCPtrMem);
2030#endif
2031}
2032
2033
2034/**
2035 * Used by TB code to fetch an unsigned 64-bit item off a generic stack.
2036 */
2037IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpStackFetchU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2038{
2039#ifdef IEMNATIVE_WITH_TLB_LOOKUP_POP
2040 return iemMemFetchStackU64SafeJmp(pVCpu, GCPtrMem);
2041#else
2042 return iemMemFetchStackU64Jmp(pVCpu, GCPtrMem);
2043#endif
2044}
2045
2046
2047
2048/*********************************************************************************************************************************
2049* Helpers: Flat memory fetches and stores. *
2050*********************************************************************************************************************************/
2051
2052/**
2053 * Used by TB code to load unsigned 8-bit data w/ flat address.
2054 * @note Zero extending the value to 64-bit to simplify assembly.
2055 */
2056IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU8,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2057{
2058#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
2059 return (uint64_t)iemMemFetchDataU8SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
2060#else
2061 return (uint64_t)iemMemFlatFetchDataU8Jmp(pVCpu, GCPtrMem);
2062#endif
2063}
2064
2065
2066/**
2067 * Used by TB code to load signed 8-bit data w/ flat address, sign extending it
2068 * to 16 bits.
2069 * @note Zero extending the value to 64-bit to simplify assembly.
2070 */
2071IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU8_Sx_U16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2072{
2073#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
2074 return (uint64_t)(uint16_t)(int16_t)(int8_t)iemMemFetchDataU8SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
2075#else
2076 return (uint64_t)(uint16_t)(int16_t)(int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, GCPtrMem);
2077#endif
2078}
2079
2080
2081/**
2082 * Used by TB code to load signed 8-bit data w/ flat address, sign extending it
2083 * to 32 bits.
2084 * @note Zero extending the value to 64-bit to simplify assembly.
2085 */
2086IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU8_Sx_U32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2087{
2088#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
2089 return (uint64_t)(uint32_t)(int32_t)(int8_t)iemMemFetchDataU8SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
2090#else
2091 return (uint64_t)(uint32_t)(int32_t)(int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, GCPtrMem);
2092#endif
2093}
2094
2095
2096/**
2097 * Used by TB code to load signed 8-bit data w/ flat address, sign extending it
2098 * to 64 bits.
2099 */
2100IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU8_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2101{
2102#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
2103 return (uint64_t)(int64_t)(int8_t)iemMemFetchDataU8SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
2104#else
2105 return (uint64_t)(int64_t)(int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, GCPtrMem);
2106#endif
2107}
2108
2109
2110/**
2111 * Used by TB code to load unsigned 16-bit data w/ flat address.
2112 * @note Zero extending the value to 64-bit to simplify assembly.
2113 */
2114IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2115{
2116#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
2117 return (uint64_t)iemMemFetchDataU16SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
2118#else
2119 return (uint64_t)iemMemFlatFetchDataU16Jmp(pVCpu, GCPtrMem);
2120#endif
2121}
2122
2123
2124/**
2125 * Used by TB code to load signed 16-bit data w/ flat address, sign extending it
2126 * to 32 bits.
2127 * @note Zero extending the value to 64-bit to simplify assembly.
2128 */
2129IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU16_Sx_U32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2130{
2131#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
2132 return (uint64_t)(uint32_t)(int32_t)(int16_t)iemMemFetchDataU16SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
2133#else
2134 return (uint64_t)(uint32_t)(int32_t)(int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, GCPtrMem);
2135#endif
2136}
2137
2138
2139/**
2140 * Used by TB code to load signed 16-bit data w/ flat address, sign extending it
2141 * to 64 bits.
2142 * @note Zero extending the value to 64-bit to simplify assembly.
2143 */
2144IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU16_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2145{
2146#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
2147 return (uint64_t)(int64_t)(int16_t)iemMemFetchDataU16SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
2148#else
2149 return (uint64_t)(int64_t)(int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, GCPtrMem);
2150#endif
2151}
2152
2153
2154/**
2155 * Used by TB code to load unsigned 32-bit data w/ flat address.
2156 * @note Zero extending the value to 64-bit to simplify assembly.
2157 */
2158IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2159{
2160#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
2161 return (uint64_t)iemMemFetchDataU32SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
2162#else
2163 return (uint64_t)iemMemFlatFetchDataU32Jmp(pVCpu, GCPtrMem);
2164#endif
2165}
2166
2167
2168/**
2169 * Used by TB code to load signed 32-bit data w/ flat address, sign extending it
2170 * to 64 bits.
2171 * @note Zero extending the value to 64-bit to simplify assembly.
2172 */
2173IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU32_Sx_U64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2174{
2175#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
2176 return (uint64_t)(int64_t)(int32_t)iemMemFetchDataU32SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
2177#else
2178 return (uint64_t)(int64_t)(int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, GCPtrMem);
2179#endif
2180}
2181
2182
2183/**
2184 * Used by TB code to load unsigned 64-bit data w/ flat address.
2185 */
2186IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpMemFlatFetchDataU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2187{
2188#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
2189 return iemMemFetchDataU64SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);
2190#else
2191 return iemMemFlatFetchDataU64Jmp(pVCpu, GCPtrMem);
2192#endif
2193}
2194
2195
2196#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
2197/**
2198 * Used by TB code to load unsigned 128-bit data w/ flat address.
2199 */
2200IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatFetchDataU128AlignedSse,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PRTUINT128U pu128Dst))
2201{
2202#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
2203 return iemMemFetchDataU128AlignedSseSafeJmp(pVCpu, pu128Dst, UINT8_MAX, GCPtrMem);
2204#else
2205 return iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, pu128Dst, UINT8_MAX, GCPtrMem);
2206#endif
2207}
2208
2209
2210/**
2211 * Used by TB code to load unsigned 128-bit data w/ flat address.
2212 */
2213IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatFetchDataU128NoAc,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, PRTUINT128U pu128Dst))
2214{
2215#ifdef IEMNATIVE_WITH_TLB_LOOKUP_FETCH
2216 return iemMemFetchDataU128NoAcSafeJmp(pVCpu, pu128Dst, UINT8_MAX, GCPtrMem);
2217#else
2218 return iemMemFlatFetchDataU128NoAcJmp(pVCpu, pu128Dst, UINT8_MAX, GCPtrMem);
2219#endif
2220}
2221#endif
2222
2223
2224/**
2225 * Used by TB code to store unsigned 8-bit data w/ flat address.
2226 */
2227IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatStoreDataU8,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint8_t u8Value))
2228{
2229#ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
2230 iemMemStoreDataU8SafeJmp(pVCpu, UINT8_MAX, GCPtrMem, u8Value);
2231#else
2232 iemMemFlatStoreDataU8Jmp(pVCpu, GCPtrMem, u8Value);
2233#endif
2234}
2235
2236
2237/**
2238 * Used by TB code to store unsigned 16-bit data w/ flat address.
2239 */
2240IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatStoreDataU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t u16Value))
2241{
2242#ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
2243 iemMemStoreDataU16SafeJmp(pVCpu, UINT8_MAX, GCPtrMem, u16Value);
2244#else
2245 iemMemFlatStoreDataU16Jmp(pVCpu, GCPtrMem, u16Value);
2246#endif
2247}
2248
2249
2250/**
2251 * Used by TB code to store unsigned 32-bit data w/ flat address.
2252 */
2253IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatStoreDataU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value))
2254{
2255#ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
2256 iemMemStoreDataU32SafeJmp(pVCpu, UINT8_MAX, GCPtrMem, u32Value);
2257#else
2258 iemMemFlatStoreDataU32Jmp(pVCpu, GCPtrMem, u32Value);
2259#endif
2260}
2261
2262
2263/**
2264 * Used by TB code to store unsigned 64-bit data w/ flat address.
2265 */
2266IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemFlatStoreDataU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t u64Value))
2267{
2268#ifdef IEMNATIVE_WITH_TLB_LOOKUP_STORE
2269 iemMemStoreDataU64SafeJmp(pVCpu, UINT8_MAX, GCPtrMem, u64Value);
2270#else
2271 iemMemFlatStoreDataU64Jmp(pVCpu, GCPtrMem, u64Value);
2272#endif
2273}
2274
2275
2276
2277/**
2278 * Used by TB code to store an unsigned 16-bit value onto a flat stack.
2279 */
2280IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlatStoreU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t u16Value))
2281{
2282#ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
2283 iemMemStoreStackU16SafeJmp(pVCpu, GCPtrMem, u16Value);
2284#else
2285 iemMemFlatStoreStackU16Jmp(pVCpu, GCPtrMem, u16Value);
2286#endif
2287}
2288
2289
2290/**
2291 * Used by TB code to store an unsigned 32-bit value onto a flat stack.
2292 */
2293IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlatStoreU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value))
2294{
2295#ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
2296 iemMemStoreStackU32SafeJmp(pVCpu, GCPtrMem, u32Value);
2297#else
2298 iemMemFlatStoreStackU32Jmp(pVCpu, GCPtrMem, u32Value);
2299#endif
2300}
2301
2302
2303/**
2304 * Used by TB code to store a segment selector value onto a flat stack.
2305 *
2306 * Intel CPUs doesn't do write a whole dword, thus the special function.
2307 */
2308IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlatStoreU32SReg,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value))
2309{
2310#ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
2311 iemMemStoreStackU32SRegSafeJmp(pVCpu, GCPtrMem, u32Value);
2312#else
2313 iemMemFlatStoreStackU32SRegJmp(pVCpu, GCPtrMem, u32Value);
2314#endif
2315}
2316
2317
2318/**
2319 * Used by TB code to store an unsigned 64-bit value onto a flat stack.
2320 */
2321IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlatStoreU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t u64Value))
2322{
2323#ifdef IEMNATIVE_WITH_TLB_LOOKUP_PUSH
2324 iemMemStoreStackU64SafeJmp(pVCpu, GCPtrMem, u64Value);
2325#else
2326 iemMemFlatStoreStackU64Jmp(pVCpu, GCPtrMem, u64Value);
2327#endif
2328}
2329
2330
2331/**
2332 * Used by TB code to fetch an unsigned 16-bit item off a generic stack.
2333 */
2334IEM_DECL_NATIVE_HLP_DEF(uint16_t, iemNativeHlpStackFlatFetchU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2335{
2336#ifdef IEMNATIVE_WITH_TLB_LOOKUP_POP
2337 return iemMemFetchStackU16SafeJmp(pVCpu, GCPtrMem);
2338#else
2339 return iemMemFlatFetchStackU16Jmp(pVCpu, GCPtrMem);
2340#endif
2341}
2342
2343
2344/**
2345 * Used by TB code to fetch an unsigned 32-bit item off a generic stack.
2346 */
2347IEM_DECL_NATIVE_HLP_DEF(uint32_t, iemNativeHlpStackFlatFetchU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2348{
2349#ifdef IEMNATIVE_WITH_TLB_LOOKUP_POP
2350 return iemMemFetchStackU32SafeJmp(pVCpu, GCPtrMem);
2351#else
2352 return iemMemFlatFetchStackU32Jmp(pVCpu, GCPtrMem);
2353#endif
2354}
2355
2356
2357/**
2358 * Used by TB code to fetch an unsigned 64-bit item off a generic stack.
2359 */
2360IEM_DECL_NATIVE_HLP_DEF(uint64_t, iemNativeHlpStackFlatFetchU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem))
2361{
2362#ifdef IEMNATIVE_WITH_TLB_LOOKUP_POP
2363 return iemMemFetchStackU64SafeJmp(pVCpu, GCPtrMem);
2364#else
2365 return iemMemFlatFetchStackU64Jmp(pVCpu, GCPtrMem);
2366#endif
2367}
2368
2369
2370
2371/*********************************************************************************************************************************
2372* Helpers: Segmented memory mapping. *
2373*********************************************************************************************************************************/
2374
2375/**
2376 * Used by TB code to map unsigned 8-bit data for atomic read-write w/
2377 * segmentation.
2378 */
2379IEM_DECL_NATIVE_HLP_DEF(uint8_t *, iemNativeHlpMemMapDataU8Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2380 RTGCPTR GCPtrMem, uint8_t iSegReg))
2381{
2382#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2383 return iemMemMapDataU8AtSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2384#else
2385 return iemMemMapDataU8AtJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2386#endif
2387}
2388
2389
2390/**
2391 * Used by TB code to map unsigned 8-bit data read-write w/ segmentation.
2392 */
2393IEM_DECL_NATIVE_HLP_DEF(uint8_t *, iemNativeHlpMemMapDataU8Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2394 RTGCPTR GCPtrMem, uint8_t iSegReg))
2395{
2396#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2397 return iemMemMapDataU8RwSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2398#else
2399 return iemMemMapDataU8RwJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2400#endif
2401}
2402
2403
2404/**
2405 * Used by TB code to map unsigned 8-bit data writeonly w/ segmentation.
2406 */
2407IEM_DECL_NATIVE_HLP_DEF(uint8_t *, iemNativeHlpMemMapDataU8Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2408 RTGCPTR GCPtrMem, uint8_t iSegReg))
2409{
2410#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2411 return iemMemMapDataU8WoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2412#else
2413 return iemMemMapDataU8WoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2414#endif
2415}
2416
2417
2418/**
2419 * Used by TB code to map unsigned 8-bit data readonly w/ segmentation.
2420 */
2421IEM_DECL_NATIVE_HLP_DEF(uint8_t const *, iemNativeHlpMemMapDataU8Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2422 RTGCPTR GCPtrMem, uint8_t iSegReg))
2423{
2424#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2425 return iemMemMapDataU8RoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2426#else
2427 return iemMemMapDataU8RoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2428#endif
2429}
2430
2431
2432/**
2433 * Used by TB code to map unsigned 16-bit data for atomic read-write w/
2434 * segmentation.
2435 */
2436IEM_DECL_NATIVE_HLP_DEF(uint16_t *, iemNativeHlpMemMapDataU16Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2437 RTGCPTR GCPtrMem, uint8_t iSegReg))
2438{
2439#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2440 return iemMemMapDataU16AtSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2441#else
2442 return iemMemMapDataU16AtJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2443#endif
2444}
2445
2446
2447/**
2448 * Used by TB code to map unsigned 16-bit data read-write w/ segmentation.
2449 */
2450IEM_DECL_NATIVE_HLP_DEF(uint16_t *, iemNativeHlpMemMapDataU16Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2451 RTGCPTR GCPtrMem, uint8_t iSegReg))
2452{
2453#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2454 return iemMemMapDataU16RwSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2455#else
2456 return iemMemMapDataU16RwJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2457#endif
2458}
2459
2460
2461/**
2462 * Used by TB code to map unsigned 16-bit data writeonly w/ segmentation.
2463 */
2464IEM_DECL_NATIVE_HLP_DEF(uint16_t *, iemNativeHlpMemMapDataU16Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2465 RTGCPTR GCPtrMem, uint8_t iSegReg))
2466{
2467#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2468 return iemMemMapDataU16WoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2469#else
2470 return iemMemMapDataU16WoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2471#endif
2472}
2473
2474
2475/**
2476 * Used by TB code to map unsigned 16-bit data readonly w/ segmentation.
2477 */
2478IEM_DECL_NATIVE_HLP_DEF(uint16_t const *, iemNativeHlpMemMapDataU16Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2479 RTGCPTR GCPtrMem, uint8_t iSegReg))
2480{
2481#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2482 return iemMemMapDataU16RoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2483#else
2484 return iemMemMapDataU16RoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2485#endif
2486}
2487
2488
2489/**
2490 * Used by TB code to map unsigned 32-bit data for atomic read-write w/
2491 * segmentation.
2492 */
2493IEM_DECL_NATIVE_HLP_DEF(uint32_t *, iemNativeHlpMemMapDataU32Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2494 RTGCPTR GCPtrMem, uint8_t iSegReg))
2495{
2496#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2497 return iemMemMapDataU32AtSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2498#else
2499 return iemMemMapDataU32AtJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2500#endif
2501}
2502
2503
2504/**
2505 * Used by TB code to map unsigned 32-bit data read-write w/ segmentation.
2506 */
2507IEM_DECL_NATIVE_HLP_DEF(uint32_t *, iemNativeHlpMemMapDataU32Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2508 RTGCPTR GCPtrMem, uint8_t iSegReg))
2509{
2510#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2511 return iemMemMapDataU32RwSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2512#else
2513 return iemMemMapDataU32RwJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2514#endif
2515}
2516
2517
2518/**
2519 * Used by TB code to map unsigned 32-bit data writeonly w/ segmentation.
2520 */
2521IEM_DECL_NATIVE_HLP_DEF(uint32_t *, iemNativeHlpMemMapDataU32Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2522 RTGCPTR GCPtrMem, uint8_t iSegReg))
2523{
2524#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2525 return iemMemMapDataU32WoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2526#else
2527 return iemMemMapDataU32WoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2528#endif
2529}
2530
2531
2532/**
2533 * Used by TB code to map unsigned 32-bit data readonly w/ segmentation.
2534 */
2535IEM_DECL_NATIVE_HLP_DEF(uint32_t const *, iemNativeHlpMemMapDataU32Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2536 RTGCPTR GCPtrMem, uint8_t iSegReg))
2537{
2538#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2539 return iemMemMapDataU32RoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2540#else
2541 return iemMemMapDataU32RoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2542#endif
2543}
2544
2545
2546/**
2547 * Used by TB code to map unsigned 64-bit data for atomic read-write w/
2548 * segmentation.
2549 */
2550IEM_DECL_NATIVE_HLP_DEF(uint64_t *, iemNativeHlpMemMapDataU64Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2551 RTGCPTR GCPtrMem, uint8_t iSegReg))
2552{
2553#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2554 return iemMemMapDataU64AtSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2555#else
2556 return iemMemMapDataU64AtJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2557#endif
2558}
2559
2560
2561/**
2562 * Used by TB code to map unsigned 64-bit data read-write w/ segmentation.
2563 */
2564IEM_DECL_NATIVE_HLP_DEF(uint64_t *, iemNativeHlpMemMapDataU64Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2565 RTGCPTR GCPtrMem, uint8_t iSegReg))
2566{
2567#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2568 return iemMemMapDataU64RwSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2569#else
2570 return iemMemMapDataU64RwJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2571#endif
2572}
2573
2574
2575/**
2576 * Used by TB code to map unsigned 64-bit data writeonly w/ segmentation.
2577 */
2578IEM_DECL_NATIVE_HLP_DEF(uint64_t *, iemNativeHlpMemMapDataU64Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2579 RTGCPTR GCPtrMem, uint8_t iSegReg))
2580{
2581#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2582 return iemMemMapDataU64WoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2583#else
2584 return iemMemMapDataU64WoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2585#endif
2586}
2587
2588
2589/**
2590 * Used by TB code to map unsigned 64-bit data readonly w/ segmentation.
2591 */
2592IEM_DECL_NATIVE_HLP_DEF(uint64_t const *, iemNativeHlpMemMapDataU64Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2593 RTGCPTR GCPtrMem, uint8_t iSegReg))
2594{
2595#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2596 return iemMemMapDataU64RoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2597#else
2598 return iemMemMapDataU64RoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2599#endif
2600}
2601
2602
2603/**
2604 * Used by TB code to map 80-bit float data writeonly w/ segmentation.
2605 */
2606IEM_DECL_NATIVE_HLP_DEF(RTFLOAT80U *, iemNativeHlpMemMapDataR80Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2607 RTGCPTR GCPtrMem, uint8_t iSegReg))
2608{
2609#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2610 return iemMemMapDataR80WoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2611#else
2612 return iemMemMapDataR80WoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2613#endif
2614}
2615
2616
2617/**
2618 * Used by TB code to map 80-bit BCD data writeonly w/ segmentation.
2619 */
2620IEM_DECL_NATIVE_HLP_DEF(RTPBCD80U *, iemNativeHlpMemMapDataD80Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2621 RTGCPTR GCPtrMem, uint8_t iSegReg))
2622{
2623#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2624 return iemMemMapDataD80WoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2625#else
2626 return iemMemMapDataD80WoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2627#endif
2628}
2629
2630
2631/**
2632 * Used by TB code to map unsigned 128-bit data for atomic read-write w/
2633 * segmentation.
2634 */
2635IEM_DECL_NATIVE_HLP_DEF(RTUINT128U *, iemNativeHlpMemMapDataU128Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2636 RTGCPTR GCPtrMem, uint8_t iSegReg))
2637{
2638#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2639 return iemMemMapDataU128AtSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2640#else
2641 return iemMemMapDataU128AtJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2642#endif
2643}
2644
2645
2646/**
2647 * Used by TB code to map unsigned 128-bit data read-write w/ segmentation.
2648 */
2649IEM_DECL_NATIVE_HLP_DEF(RTUINT128U *, iemNativeHlpMemMapDataU128Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2650 RTGCPTR GCPtrMem, uint8_t iSegReg))
2651{
2652#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2653 return iemMemMapDataU128RwSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2654#else
2655 return iemMemMapDataU128RwJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2656#endif
2657}
2658
2659
2660/**
2661 * Used by TB code to map unsigned 128-bit data writeonly w/ segmentation.
2662 */
2663IEM_DECL_NATIVE_HLP_DEF(RTUINT128U *, iemNativeHlpMemMapDataU128Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2664 RTGCPTR GCPtrMem, uint8_t iSegReg))
2665{
2666#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2667 return iemMemMapDataU128WoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2668#else
2669 return iemMemMapDataU128WoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2670#endif
2671}
2672
2673
2674/**
2675 * Used by TB code to map unsigned 128-bit data readonly w/ segmentation.
2676 */
2677IEM_DECL_NATIVE_HLP_DEF(RTUINT128U const *, iemNativeHlpMemMapDataU128Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
2678 RTGCPTR GCPtrMem, uint8_t iSegReg))
2679{
2680#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2681 return iemMemMapDataU128RoSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2682#else
2683 return iemMemMapDataU128RoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
2684#endif
2685}
2686
2687
2688/*********************************************************************************************************************************
2689* Helpers: Flat memory mapping. *
2690*********************************************************************************************************************************/
2691
2692/**
2693 * Used by TB code to map unsigned 8-bit data for atomic read-write w/ flat
2694 * address.
2695 */
2696IEM_DECL_NATIVE_HLP_DEF(uint8_t *, iemNativeHlpMemFlatMapDataU8Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2697{
2698#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2699 return iemMemMapDataU8AtSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2700#else
2701 return iemMemFlatMapDataU8AtJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2702#endif
2703}
2704
2705
2706/**
2707 * Used by TB code to map unsigned 8-bit data read-write w/ flat address.
2708 */
2709IEM_DECL_NATIVE_HLP_DEF(uint8_t *, iemNativeHlpMemFlatMapDataU8Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2710{
2711#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2712 return iemMemMapDataU8RwSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2713#else
2714 return iemMemFlatMapDataU8RwJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2715#endif
2716}
2717
2718
2719/**
2720 * Used by TB code to map unsigned 8-bit data writeonly w/ flat address.
2721 */
2722IEM_DECL_NATIVE_HLP_DEF(uint8_t *, iemNativeHlpMemFlatMapDataU8Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2723{
2724#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2725 return iemMemMapDataU8WoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2726#else
2727 return iemMemFlatMapDataU8WoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2728#endif
2729}
2730
2731
2732/**
2733 * Used by TB code to map unsigned 8-bit data readonly w/ flat address.
2734 */
2735IEM_DECL_NATIVE_HLP_DEF(uint8_t const *, iemNativeHlpMemFlatMapDataU8Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2736{
2737#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2738 return iemMemMapDataU8RoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2739#else
2740 return iemMemFlatMapDataU8RoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2741#endif
2742}
2743
2744
2745/**
2746 * Used by TB code to map unsigned 16-bit data for atomic read-write w/ flat
2747 * address.
2748 */
2749IEM_DECL_NATIVE_HLP_DEF(uint16_t *, iemNativeHlpMemFlatMapDataU16Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2750{
2751#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2752 return iemMemMapDataU16AtSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2753#else
2754 return iemMemFlatMapDataU16AtJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2755#endif
2756}
2757
2758
2759/**
2760 * Used by TB code to map unsigned 16-bit data read-write w/ flat address.
2761 */
2762IEM_DECL_NATIVE_HLP_DEF(uint16_t *, iemNativeHlpMemFlatMapDataU16Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2763{
2764#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2765 return iemMemMapDataU16RwSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2766#else
2767 return iemMemFlatMapDataU16RwJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2768#endif
2769}
2770
2771
2772/**
2773 * Used by TB code to map unsigned 16-bit data writeonly w/ flat address.
2774 */
2775IEM_DECL_NATIVE_HLP_DEF(uint16_t *, iemNativeHlpMemFlatMapDataU16Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2776{
2777#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2778 return iemMemMapDataU16WoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2779#else
2780 return iemMemFlatMapDataU16WoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2781#endif
2782}
2783
2784
2785/**
2786 * Used by TB code to map unsigned 16-bit data readonly w/ flat address.
2787 */
2788IEM_DECL_NATIVE_HLP_DEF(uint16_t const *, iemNativeHlpMemFlatMapDataU16Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2789{
2790#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2791 return iemMemMapDataU16RoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2792#else
2793 return iemMemFlatMapDataU16RoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2794#endif
2795}
2796
2797
2798/**
2799 * Used by TB code to map unsigned 32-bit data for atomic read-write w/ flat
2800 * address.
2801 */
2802IEM_DECL_NATIVE_HLP_DEF(uint32_t *, iemNativeHlpMemFlatMapDataU32Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2803{
2804#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2805 return iemMemMapDataU32AtSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2806#else
2807 return iemMemFlatMapDataU32AtJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2808#endif
2809}
2810
2811
2812/**
2813 * Used by TB code to map unsigned 32-bit data read-write w/ flat address.
2814 */
2815IEM_DECL_NATIVE_HLP_DEF(uint32_t *, iemNativeHlpMemFlatMapDataU32Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2816{
2817#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2818 return iemMemMapDataU32RwSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2819#else
2820 return iemMemFlatMapDataU32RwJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2821#endif
2822}
2823
2824
2825/**
2826 * Used by TB code to map unsigned 32-bit data writeonly w/ flat address.
2827 */
2828IEM_DECL_NATIVE_HLP_DEF(uint32_t *, iemNativeHlpMemFlatMapDataU32Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2829{
2830#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2831 return iemMemMapDataU32WoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2832#else
2833 return iemMemFlatMapDataU32WoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2834#endif
2835}
2836
2837
2838/**
2839 * Used by TB code to map unsigned 32-bit data readonly w/ flat address.
2840 */
2841IEM_DECL_NATIVE_HLP_DEF(uint32_t const *, iemNativeHlpMemFlatMapDataU32Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2842{
2843#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2844 return iemMemMapDataU32RoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2845#else
2846 return iemMemFlatMapDataU32RoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2847#endif
2848}
2849
2850
2851/**
2852 * Used by TB code to map unsigned 64-bit data for atomic read-write w/ flat
2853 * address.
2854 */
2855IEM_DECL_NATIVE_HLP_DEF(uint64_t *, iemNativeHlpMemFlatMapDataU64Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2856{
2857#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2858 return iemMemMapDataU64AtSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2859#else
2860 return iemMemFlatMapDataU64AtJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2861#endif
2862}
2863
2864
2865/**
2866 * Used by TB code to map unsigned 64-bit data read-write w/ flat address.
2867 */
2868IEM_DECL_NATIVE_HLP_DEF(uint64_t *, iemNativeHlpMemFlatMapDataU64Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2869{
2870#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2871 return iemMemMapDataU64RwSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2872#else
2873 return iemMemFlatMapDataU64RwJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2874#endif
2875}
2876
2877
2878/**
2879 * Used by TB code to map unsigned 64-bit data writeonly w/ flat address.
2880 */
2881IEM_DECL_NATIVE_HLP_DEF(uint64_t *, iemNativeHlpMemFlatMapDataU64Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2882{
2883#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2884 return iemMemMapDataU64WoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2885#else
2886 return iemMemFlatMapDataU64WoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2887#endif
2888}
2889
2890
2891/**
2892 * Used by TB code to map unsigned 64-bit data readonly w/ flat address.
2893 */
2894IEM_DECL_NATIVE_HLP_DEF(uint64_t const *, iemNativeHlpMemFlatMapDataU64Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2895{
2896#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2897 return iemMemMapDataU64RoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2898#else
2899 return iemMemFlatMapDataU64RoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2900#endif
2901}
2902
2903
2904/**
2905 * Used by TB code to map 80-bit float data writeonly w/ flat address.
2906 */
2907IEM_DECL_NATIVE_HLP_DEF(RTFLOAT80U *, iemNativeHlpMemFlatMapDataR80Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2908{
2909#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2910 return iemMemMapDataR80WoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2911#else
2912 return iemMemFlatMapDataR80WoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2913#endif
2914}
2915
2916
2917/**
2918 * Used by TB code to map 80-bit BCD data writeonly w/ flat address.
2919 */
2920IEM_DECL_NATIVE_HLP_DEF(RTPBCD80U *, iemNativeHlpMemFlatMapDataD80Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2921{
2922#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2923 return iemMemMapDataD80WoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2924#else
2925 return iemMemFlatMapDataD80WoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2926#endif
2927}
2928
2929
2930/**
2931 * Used by TB code to map unsigned 128-bit data for atomic read-write w/ flat
2932 * address.
2933 */
2934IEM_DECL_NATIVE_HLP_DEF(RTUINT128U *, iemNativeHlpMemFlatMapDataU128Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2935{
2936#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2937 return iemMemMapDataU128AtSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2938#else
2939 return iemMemFlatMapDataU128AtJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2940#endif
2941}
2942
2943
2944/**
2945 * Used by TB code to map unsigned 128-bit data read-write w/ flat address.
2946 */
2947IEM_DECL_NATIVE_HLP_DEF(RTUINT128U *, iemNativeHlpMemFlatMapDataU128Rw,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2948{
2949#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2950 return iemMemMapDataU128RwSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2951#else
2952 return iemMemFlatMapDataU128RwJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2953#endif
2954}
2955
2956
2957/**
2958 * Used by TB code to map unsigned 128-bit data writeonly w/ flat address.
2959 */
2960IEM_DECL_NATIVE_HLP_DEF(RTUINT128U *, iemNativeHlpMemFlatMapDataU128Wo,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2961{
2962#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2963 return iemMemMapDataU128WoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2964#else
2965 return iemMemFlatMapDataU128WoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2966#endif
2967}
2968
2969
2970/**
2971 * Used by TB code to map unsigned 128-bit data readonly w/ flat address.
2972 */
2973IEM_DECL_NATIVE_HLP_DEF(RTUINT128U const *, iemNativeHlpMemFlatMapDataU128Ro,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem))
2974{
2975#ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
2976 return iemMemMapDataU128RoSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
2977#else
2978 return iemMemFlatMapDataU128RoJmp(pVCpu, pbUnmapInfo, GCPtrMem);
2979#endif
2980}
2981
2982
2983/*********************************************************************************************************************************
2984* Helpers: Commit, rollback & unmap *
2985*********************************************************************************************************************************/
2986
2987/**
2988 * Used by TB code to commit and unmap a read-write memory mapping.
2989 */
2990IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemCommitAndUnmapAtomic,(PVMCPUCC pVCpu, uint8_t bUnmapInfo))
2991{
2992 return iemMemCommitAndUnmapAtSafeJmp(pVCpu, bUnmapInfo);
2993}
2994
2995
2996/**
2997 * Used by TB code to commit and unmap a read-write memory mapping.
2998 */
2999IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemCommitAndUnmapRw,(PVMCPUCC pVCpu, uint8_t bUnmapInfo))
3000{
3001 return iemMemCommitAndUnmapRwSafeJmp(pVCpu, bUnmapInfo);
3002}
3003
3004
3005/**
3006 * Used by TB code to commit and unmap a write-only memory mapping.
3007 */
3008IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemCommitAndUnmapWo,(PVMCPUCC pVCpu, uint8_t bUnmapInfo))
3009{
3010 return iemMemCommitAndUnmapWoSafeJmp(pVCpu, bUnmapInfo);
3011}
3012
3013
3014/**
3015 * Used by TB code to commit and unmap a read-only memory mapping.
3016 */
3017IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemCommitAndUnmapRo,(PVMCPUCC pVCpu, uint8_t bUnmapInfo))
3018{
3019 return iemMemCommitAndUnmapRoSafeJmp(pVCpu, bUnmapInfo);
3020}
3021
3022
3023/**
3024 * Reinitializes the native recompiler state.
3025 *
3026 * Called before starting a new recompile job.
3027 */
3028static PIEMRECOMPILERSTATE iemNativeReInit(PIEMRECOMPILERSTATE pReNative, PCIEMTB pTb)
3029{
3030 pReNative->cLabels = 0;
3031 pReNative->bmLabelTypes = 0;
3032 pReNative->cFixups = 0;
3033#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
3034 pReNative->pDbgInfo->cEntries = 0;
3035#endif
3036 pReNative->pTbOrg = pTb;
3037 pReNative->cCondDepth = 0;
3038 pReNative->uCondSeqNo = 0;
3039 pReNative->uCheckIrqSeqNo = 0;
3040 pReNative->uTlbSeqNo = 0;
3041
3042#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
3043 pReNative->Core.offPc = 0;
3044 pReNative->Core.cInstrPcUpdateSkipped = 0;
3045#endif
3046#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
3047 pReNative->fSimdRaiseXcptChecksEmitted = 0;
3048#endif
3049 pReNative->Core.bmHstRegs = IEMNATIVE_REG_FIXED_MASK
3050#if IEMNATIVE_HST_GREG_COUNT < 32
3051 | ~(RT_BIT(IEMNATIVE_HST_GREG_COUNT) - 1U)
3052#endif
3053 ;
3054 pReNative->Core.bmHstRegsWithGstShadow = 0;
3055 pReNative->Core.bmGstRegShadows = 0;
3056 pReNative->Core.bmVars = 0;
3057 pReNative->Core.bmStack = 0;
3058 AssertCompile(sizeof(pReNative->Core.bmStack) * 8 == IEMNATIVE_FRAME_VAR_SLOTS); /* Must set reserved slots to 1 otherwise. */
3059 pReNative->Core.u64ArgVars = UINT64_MAX;
3060
3061 AssertCompile(RT_ELEMENTS(pReNative->aidxUniqueLabels) == 16);
3062 pReNative->aidxUniqueLabels[0] = UINT32_MAX;
3063 pReNative->aidxUniqueLabels[1] = UINT32_MAX;
3064 pReNative->aidxUniqueLabels[2] = UINT32_MAX;
3065 pReNative->aidxUniqueLabels[3] = UINT32_MAX;
3066 pReNative->aidxUniqueLabels[4] = UINT32_MAX;
3067 pReNative->aidxUniqueLabels[5] = UINT32_MAX;
3068 pReNative->aidxUniqueLabels[6] = UINT32_MAX;
3069 pReNative->aidxUniqueLabels[7] = UINT32_MAX;
3070 pReNative->aidxUniqueLabels[8] = UINT32_MAX;
3071 pReNative->aidxUniqueLabels[9] = UINT32_MAX;
3072 pReNative->aidxUniqueLabels[10] = UINT32_MAX;
3073 pReNative->aidxUniqueLabels[11] = UINT32_MAX;
3074 pReNative->aidxUniqueLabels[12] = UINT32_MAX;
3075 pReNative->aidxUniqueLabels[13] = UINT32_MAX;
3076 pReNative->aidxUniqueLabels[14] = UINT32_MAX;
3077 pReNative->aidxUniqueLabels[15] = UINT32_MAX;
3078
3079 /* Full host register reinit: */
3080 for (unsigned i = 0; i < RT_ELEMENTS(pReNative->Core.aHstRegs); i++)
3081 {
3082 pReNative->Core.aHstRegs[i].fGstRegShadows = 0;
3083 pReNative->Core.aHstRegs[i].enmWhat = kIemNativeWhat_Invalid;
3084 pReNative->Core.aHstRegs[i].idxVar = UINT8_MAX;
3085 }
3086
3087 uint32_t fRegs = IEMNATIVE_REG_FIXED_MASK
3088 & ~( RT_BIT_32(IEMNATIVE_REG_FIXED_PVMCPU)
3089#ifdef IEMNATIVE_REG_FIXED_PCPUMCTX
3090 | RT_BIT_32(IEMNATIVE_REG_FIXED_PCPUMCTX)
3091#endif
3092#ifdef IEMNATIVE_REG_FIXED_PCPUMCTX
3093 | RT_BIT_32(IEMNATIVE_REG_FIXED_TMP0)
3094#endif
3095#ifdef IEMNATIVE_REG_FIXED_TMP1
3096 | RT_BIT_32(IEMNATIVE_REG_FIXED_TMP1)
3097#endif
3098#ifdef IEMNATIVE_REG_FIXED_PC_DBG
3099 | RT_BIT_32(IEMNATIVE_REG_FIXED_PC_DBG)
3100#endif
3101 );
3102 for (uint32_t idxReg = ASMBitFirstSetU32(fRegs) - 1; fRegs != 0; idxReg = ASMBitFirstSetU32(fRegs) - 1)
3103 {
3104 fRegs &= ~RT_BIT_32(idxReg);
3105 pReNative->Core.aHstRegs[IEMNATIVE_REG_FIXED_PVMCPU].enmWhat = kIemNativeWhat_FixedReserved;
3106 }
3107
3108 pReNative->Core.aHstRegs[IEMNATIVE_REG_FIXED_PVMCPU].enmWhat = kIemNativeWhat_pVCpuFixed;
3109#ifdef IEMNATIVE_REG_FIXED_PCPUMCTX
3110 pReNative->Core.aHstRegs[IEMNATIVE_REG_FIXED_PCPUMCTX].enmWhat = kIemNativeWhat_pCtxFixed;
3111#endif
3112#ifdef IEMNATIVE_REG_FIXED_TMP0
3113 pReNative->Core.aHstRegs[IEMNATIVE_REG_FIXED_TMP0].enmWhat = kIemNativeWhat_FixedTmp;
3114#endif
3115#ifdef IEMNATIVE_REG_FIXED_TMP1
3116 pReNative->Core.aHstRegs[IEMNATIVE_REG_FIXED_TMP1].enmWhat = kIemNativeWhat_FixedTmp;
3117#endif
3118#ifdef IEMNATIVE_REG_FIXED_PC_DBG
3119 pReNative->Core.aHstRegs[IEMNATIVE_REG_FIXED_PC_DBG].enmWhat = kIemNativeWhat_PcShadow;
3120#endif
3121
3122#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
3123# ifdef RT_ARCH_ARM64
3124 /*
3125 * Arm64 has 32 128-bit registers only, in order to support emulating 256-bit registers we pair
3126 * two real registers statically to one virtual for now, leaving us with only 16 256-bit registers.
3127 * We always pair v0 with v1, v2 with v3, etc. so we mark the higher register as fixed here during init
3128 * and the register allocator assumes that it will be always free when the lower is picked.
3129 */
3130 uint32_t const fFixedAdditional = UINT32_C(0xaaaaaaaa);
3131# else
3132 uint32_t const fFixedAdditional = 0;
3133# endif
3134
3135 pReNative->Core.bmHstSimdRegs = IEMNATIVE_SIMD_REG_FIXED_MASK
3136 | fFixedAdditional
3137# if IEMNATIVE_HST_SIMD_REG_COUNT < 32
3138 | ~(RT_BIT(IEMNATIVE_HST_SIMD_REG_COUNT) - 1U)
3139# endif
3140 ;
3141 pReNative->Core.bmHstSimdRegsWithGstShadow = 0;
3142 pReNative->Core.bmGstSimdRegShadows = 0;
3143 pReNative->Core.bmGstSimdRegShadowDirtyLo128 = 0;
3144 pReNative->Core.bmGstSimdRegShadowDirtyHi128 = 0;
3145
3146 /* Full host register reinit: */
3147 for (unsigned i = 0; i < RT_ELEMENTS(pReNative->Core.aHstSimdRegs); i++)
3148 {
3149 pReNative->Core.aHstSimdRegs[i].fGstRegShadows = 0;
3150 pReNative->Core.aHstSimdRegs[i].enmWhat = kIemNativeWhat_Invalid;
3151 pReNative->Core.aHstSimdRegs[i].idxVar = UINT8_MAX;
3152 pReNative->Core.aHstSimdRegs[i].enmLoaded = kIemNativeGstSimdRegLdStSz_Invalid;
3153 }
3154
3155 fRegs = IEMNATIVE_SIMD_REG_FIXED_MASK | fFixedAdditional;
3156 for (uint32_t idxReg = ASMBitFirstSetU32(fRegs) - 1; fRegs != 0; idxReg = ASMBitFirstSetU32(fRegs) - 1)
3157 {
3158 fRegs &= ~RT_BIT_32(idxReg);
3159 pReNative->Core.aHstSimdRegs[idxReg].enmWhat = kIemNativeWhat_FixedReserved;
3160 }
3161
3162#ifdef IEMNATIVE_SIMD_REG_FIXED_TMP0
3163 pReNative->Core.aHstSimdRegs[IEMNATIVE_SIMD_REG_FIXED_TMP0].enmWhat = kIemNativeWhat_FixedTmp;
3164#endif
3165
3166#endif
3167
3168 return pReNative;
3169}
3170
3171
3172/**
3173 * Allocates and initializes the native recompiler state.
3174 *
3175 * This is called the first time an EMT wants to recompile something.
3176 *
3177 * @returns Pointer to the new recompiler state.
3178 * @param pVCpu The cross context virtual CPU structure of the calling
3179 * thread.
3180 * @param pTb The TB that's about to be recompiled.
3181 * @thread EMT(pVCpu)
3182 */
3183static PIEMRECOMPILERSTATE iemNativeInit(PVMCPUCC pVCpu, PCIEMTB pTb)
3184{
3185 VMCPU_ASSERT_EMT(pVCpu);
3186
3187 PIEMRECOMPILERSTATE pReNative = (PIEMRECOMPILERSTATE)RTMemAllocZ(sizeof(*pReNative));
3188 AssertReturn(pReNative, NULL);
3189
3190 /*
3191 * Try allocate all the buffers and stuff we need.
3192 */
3193 pReNative->pInstrBuf = (PIEMNATIVEINSTR)RTMemAllocZ(_64K);
3194 pReNative->paLabels = (PIEMNATIVELABEL)RTMemAllocZ(sizeof(IEMNATIVELABEL) * _8K);
3195 pReNative->paFixups = (PIEMNATIVEFIXUP)RTMemAllocZ(sizeof(IEMNATIVEFIXUP) * _16K);
3196#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
3197 pReNative->pDbgInfo = (PIEMTBDBG)RTMemAllocZ(RT_UOFFSETOF_DYN(IEMTBDBG, aEntries[_16K]));
3198#endif
3199 if (RT_LIKELY( pReNative->pInstrBuf
3200 && pReNative->paLabels
3201 && pReNative->paFixups)
3202#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
3203 && pReNative->pDbgInfo
3204#endif
3205 )
3206 {
3207 /*
3208 * Set the buffer & array sizes on success.
3209 */
3210 pReNative->cInstrBufAlloc = _64K / sizeof(IEMNATIVEINSTR);
3211 pReNative->cLabelsAlloc = _8K;
3212 pReNative->cFixupsAlloc = _16K;
3213#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
3214 pReNative->cDbgInfoAlloc = _16K;
3215#endif
3216
3217 /* Other constant stuff: */
3218 pReNative->pVCpu = pVCpu;
3219
3220 /*
3221 * Done, just need to save it and reinit it.
3222 */
3223 pVCpu->iem.s.pNativeRecompilerStateR3 = pReNative;
3224 return iemNativeReInit(pReNative, pTb);
3225 }
3226
3227 /*
3228 * Failed. Cleanup and return.
3229 */
3230 AssertFailed();
3231 RTMemFree(pReNative->pInstrBuf);
3232 RTMemFree(pReNative->paLabels);
3233 RTMemFree(pReNative->paFixups);
3234#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
3235 RTMemFree(pReNative->pDbgInfo);
3236#endif
3237 RTMemFree(pReNative);
3238 return NULL;
3239}
3240
3241
3242/**
3243 * Creates a label
3244 *
3245 * If the label does not yet have a defined position,
3246 * call iemNativeLabelDefine() later to set it.
3247 *
3248 * @returns Label ID. Throws VBox status code on failure, so no need to check
3249 * the return value.
3250 * @param pReNative The native recompile state.
3251 * @param enmType The label type.
3252 * @param offWhere The instruction offset of the label. UINT32_MAX if the
3253 * label is not yet defined (default).
3254 * @param uData Data associated with the lable. Only applicable to
3255 * certain type of labels. Default is zero.
3256 */
3257DECL_HIDDEN_THROW(uint32_t)
3258iemNativeLabelCreate(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType,
3259 uint32_t offWhere /*= UINT32_MAX*/, uint16_t uData /*= 0*/)
3260{
3261 Assert(uData == 0 || enmType >= kIemNativeLabelType_FirstWithMultipleInstances);
3262
3263 /*
3264 * Locate existing label definition.
3265 *
3266 * This is only allowed for forward declarations where offWhere=UINT32_MAX
3267 * and uData is zero.
3268 */
3269 PIEMNATIVELABEL paLabels = pReNative->paLabels;
3270 uint32_t const cLabels = pReNative->cLabels;
3271 if ( pReNative->bmLabelTypes & RT_BIT_64(enmType)
3272#ifndef VBOX_STRICT
3273 && enmType < kIemNativeLabelType_FirstWithMultipleInstances
3274 && offWhere == UINT32_MAX
3275 && uData == 0
3276#endif
3277 )
3278 {
3279#ifndef VBOX_STRICT
3280 AssertStmt(enmType > kIemNativeLabelType_Invalid && enmType < kIemNativeLabelType_FirstWithMultipleInstances,
3281 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_1));
3282 uint32_t const idxLabel = pReNative->aidxUniqueLabels[enmType];
3283 if (idxLabel < pReNative->cLabels)
3284 return idxLabel;
3285#else
3286 for (uint32_t i = 0; i < cLabels; i++)
3287 if ( paLabels[i].enmType == enmType
3288 && paLabels[i].uData == uData)
3289 {
3290 AssertStmt(uData == 0, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_1));
3291 AssertStmt(offWhere == UINT32_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_1));
3292 AssertStmt(paLabels[i].off == UINT32_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_2));
3293 AssertStmt(enmType < kIemNativeLabelType_FirstWithMultipleInstances && pReNative->aidxUniqueLabels[enmType] == i,
3294 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_1));
3295 return i;
3296 }
3297 AssertStmt( enmType >= kIemNativeLabelType_FirstWithMultipleInstances
3298 || pReNative->aidxUniqueLabels[enmType] == UINT32_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_1));
3299#endif
3300 }
3301
3302 /*
3303 * Make sure we've got room for another label.
3304 */
3305 if (RT_LIKELY(cLabels < pReNative->cLabelsAlloc))
3306 { /* likely */ }
3307 else
3308 {
3309 uint32_t cNew = pReNative->cLabelsAlloc;
3310 AssertStmt(cNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_3));
3311 AssertStmt(cLabels == cNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_3));
3312 cNew *= 2;
3313 AssertStmt(cNew <= _64K, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_TOO_MANY)); /* IEMNATIVEFIXUP::idxLabel type restrict this */
3314 paLabels = (PIEMNATIVELABEL)RTMemRealloc(paLabels, cNew * sizeof(paLabels[0]));
3315 AssertStmt(paLabels, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_OUT_OF_MEMORY));
3316 pReNative->paLabels = paLabels;
3317 pReNative->cLabelsAlloc = cNew;
3318 }
3319
3320 /*
3321 * Define a new label.
3322 */
3323 paLabels[cLabels].off = offWhere;
3324 paLabels[cLabels].enmType = enmType;
3325 paLabels[cLabels].uData = uData;
3326 pReNative->cLabels = cLabels + 1;
3327
3328 Assert((unsigned)enmType < 64);
3329 pReNative->bmLabelTypes |= RT_BIT_64(enmType);
3330
3331 if (enmType < kIemNativeLabelType_FirstWithMultipleInstances)
3332 {
3333 Assert(uData == 0);
3334 pReNative->aidxUniqueLabels[enmType] = cLabels;
3335 }
3336
3337 if (offWhere != UINT32_MAX)
3338 {
3339#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
3340 iemNativeDbgInfoAddNativeOffset(pReNative, offWhere);
3341 iemNativeDbgInfoAddLabel(pReNative, enmType, uData);
3342#endif
3343 }
3344 return cLabels;
3345}
3346
3347
3348/**
3349 * Defines the location of an existing label.
3350 *
3351 * @param pReNative The native recompile state.
3352 * @param idxLabel The label to define.
3353 * @param offWhere The position.
3354 */
3355DECL_HIDDEN_THROW(void) iemNativeLabelDefine(PIEMRECOMPILERSTATE pReNative, uint32_t idxLabel, uint32_t offWhere)
3356{
3357 AssertStmt(idxLabel < pReNative->cLabels, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_4));
3358 PIEMNATIVELABEL const pLabel = &pReNative->paLabels[idxLabel];
3359 AssertStmt(pLabel->off == UINT32_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_5));
3360 pLabel->off = offWhere;
3361#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
3362 iemNativeDbgInfoAddNativeOffset(pReNative, offWhere);
3363 iemNativeDbgInfoAddLabel(pReNative, (IEMNATIVELABELTYPE)pLabel->enmType, pLabel->uData);
3364#endif
3365}
3366
3367
3368/**
3369 * Looks up a lable.
3370 *
3371 * @returns Label ID if found, UINT32_MAX if not.
3372 */
3373static uint32_t iemNativeLabelFind(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType,
3374 uint32_t offWhere = UINT32_MAX, uint16_t uData = 0) RT_NOEXCEPT
3375{
3376 Assert((unsigned)enmType < 64);
3377 if (RT_BIT_64(enmType) & pReNative->bmLabelTypes)
3378 {
3379 if (enmType < kIemNativeLabelType_FirstWithMultipleInstances)
3380 return pReNative->aidxUniqueLabels[enmType];
3381
3382 PIEMNATIVELABEL paLabels = pReNative->paLabels;
3383 uint32_t const cLabels = pReNative->cLabels;
3384 for (uint32_t i = 0; i < cLabels; i++)
3385 if ( paLabels[i].enmType == enmType
3386 && paLabels[i].uData == uData
3387 && ( paLabels[i].off == offWhere
3388 || offWhere == UINT32_MAX
3389 || paLabels[i].off == UINT32_MAX))
3390 return i;
3391 }
3392 return UINT32_MAX;
3393}
3394
3395
3396/**
3397 * Adds a fixup.
3398 *
3399 * @throws VBox status code (int) on failure.
3400 * @param pReNative The native recompile state.
3401 * @param offWhere The instruction offset of the fixup location.
3402 * @param idxLabel The target label ID for the fixup.
3403 * @param enmType The fixup type.
3404 * @param offAddend Fixup addend if applicable to the type. Default is 0.
3405 */
3406DECL_HIDDEN_THROW(void)
3407iemNativeAddFixup(PIEMRECOMPILERSTATE pReNative, uint32_t offWhere, uint32_t idxLabel,
3408 IEMNATIVEFIXUPTYPE enmType, int8_t offAddend /*= 0*/)
3409{
3410 Assert(idxLabel <= UINT16_MAX);
3411 Assert((unsigned)enmType <= UINT8_MAX);
3412#ifdef RT_ARCH_ARM64
3413 AssertStmt( enmType != kIemNativeFixupType_RelImm14At5
3414 || pReNative->paLabels[idxLabel].enmType >= kIemNativeLabelType_LastWholeTbBranch,
3415 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_SHORT_JMP_TO_TAIL_LABEL));
3416#endif
3417
3418 /*
3419 * Make sure we've room.
3420 */
3421 PIEMNATIVEFIXUP paFixups = pReNative->paFixups;
3422 uint32_t const cFixups = pReNative->cFixups;
3423 if (RT_LIKELY(cFixups < pReNative->cFixupsAlloc))
3424 { /* likely */ }
3425 else
3426 {
3427 uint32_t cNew = pReNative->cFixupsAlloc;
3428 AssertStmt(cNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_IPE_1));
3429 AssertStmt(cFixups == cNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_IPE_1));
3430 cNew *= 2;
3431 AssertStmt(cNew <= _128K, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_TOO_MANY));
3432 paFixups = (PIEMNATIVEFIXUP)RTMemRealloc(paFixups, cNew * sizeof(paFixups[0]));
3433 AssertStmt(paFixups, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_OUT_OF_MEMORY));
3434 pReNative->paFixups = paFixups;
3435 pReNative->cFixupsAlloc = cNew;
3436 }
3437
3438 /*
3439 * Add the fixup.
3440 */
3441 paFixups[cFixups].off = offWhere;
3442 paFixups[cFixups].idxLabel = (uint16_t)idxLabel;
3443 paFixups[cFixups].enmType = enmType;
3444 paFixups[cFixups].offAddend = offAddend;
3445 pReNative->cFixups = cFixups + 1;
3446}
3447
3448
3449/**
3450 * Slow code path for iemNativeInstrBufEnsure.
3451 */
3452DECL_HIDDEN_THROW(PIEMNATIVEINSTR) iemNativeInstrBufEnsureSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t cInstrReq)
3453{
3454 /* Double the buffer size till we meet the request. */
3455 uint32_t cNew = pReNative->cInstrBufAlloc;
3456 AssertStmt(cNew > 0, IEMNATIVE_DO_LONGJMP(pReNative, VERR_INTERNAL_ERROR_5)); /* impossible */
3457 do
3458 cNew *= 2;
3459 while (cNew < off + cInstrReq);
3460
3461 uint32_t const cbNew = cNew * sizeof(IEMNATIVEINSTR);
3462#ifdef RT_ARCH_ARM64
3463 uint32_t const cbMaxInstrBuf = _1M; /* Limited by the branch instruction range (18+2 bits). */
3464#else
3465 uint32_t const cbMaxInstrBuf = _2M;
3466#endif
3467 AssertStmt(cbNew <= cbMaxInstrBuf, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_INSTR_BUF_TOO_LARGE));
3468
3469 void *pvNew = RTMemRealloc(pReNative->pInstrBuf, cbNew);
3470 AssertStmt(pvNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_INSTR_BUF_OUT_OF_MEMORY));
3471
3472#ifdef VBOX_STRICT
3473 pReNative->offInstrBufChecked = off + cInstrReq;
3474#endif
3475 pReNative->cInstrBufAlloc = cNew;
3476 return pReNative->pInstrBuf = (PIEMNATIVEINSTR)pvNew;
3477}
3478
3479#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
3480
3481/**
3482 * Grows the static debug info array used during recompilation.
3483 *
3484 * @returns Pointer to the new debug info block; throws VBox status code on
3485 * failure, so no need to check the return value.
3486 */
3487DECL_NO_INLINE(static, PIEMTBDBG) iemNativeDbgInfoGrow(PIEMRECOMPILERSTATE pReNative, PIEMTBDBG pDbgInfo)
3488{
3489 uint32_t cNew = pReNative->cDbgInfoAlloc * 2;
3490 AssertStmt(cNew < _1M && cNew != 0, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_DBGINFO_IPE_1));
3491 pDbgInfo = (PIEMTBDBG)RTMemRealloc(pDbgInfo, RT_UOFFSETOF_DYN(IEMTBDBG, aEntries[cNew]));
3492 AssertStmt(pDbgInfo, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_DBGINFO_OUT_OF_MEMORY));
3493 pReNative->pDbgInfo = pDbgInfo;
3494 pReNative->cDbgInfoAlloc = cNew;
3495 return pDbgInfo;
3496}
3497
3498
3499/**
3500 * Adds a new debug info uninitialized entry, returning the pointer to it.
3501 */
3502DECL_INLINE_THROW(PIEMTBDBGENTRY) iemNativeDbgInfoAddNewEntry(PIEMRECOMPILERSTATE pReNative, PIEMTBDBG pDbgInfo)
3503{
3504 if (RT_LIKELY(pDbgInfo->cEntries < pReNative->cDbgInfoAlloc))
3505 { /* likely */ }
3506 else
3507 pDbgInfo = iemNativeDbgInfoGrow(pReNative, pDbgInfo);
3508 return &pDbgInfo->aEntries[pDbgInfo->cEntries++];
3509}
3510
3511
3512/**
3513 * Debug Info: Adds a native offset record, if necessary.
3514 */
3515DECL_HIDDEN_THROW(void) iemNativeDbgInfoAddNativeOffset(PIEMRECOMPILERSTATE pReNative, uint32_t off)
3516{
3517 PIEMTBDBG pDbgInfo = pReNative->pDbgInfo;
3518
3519 /*
3520 * Search backwards to see if we've got a similar record already.
3521 */
3522 uint32_t idx = pDbgInfo->cEntries;
3523 uint32_t idxStop = idx > 8 ? idx - 8 : 0;
3524 while (idx-- > idxStop)
3525 if (pDbgInfo->aEntries[idx].Gen.uType == kIemTbDbgEntryType_NativeOffset)
3526 {
3527 if (pDbgInfo->aEntries[idx].NativeOffset.offNative == off)
3528 return;
3529 AssertStmt(pDbgInfo->aEntries[idx].NativeOffset.offNative < off,
3530 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_DBGINFO_IPE_2));
3531 break;
3532 }
3533
3534 /*
3535 * Add it.
3536 */
3537 PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pDbgInfo);
3538 pEntry->NativeOffset.uType = kIemTbDbgEntryType_NativeOffset;
3539 pEntry->NativeOffset.offNative = off;
3540}
3541
3542
3543/**
3544 * Debug Info: Record info about a label.
3545 */
3546static void iemNativeDbgInfoAddLabel(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType, uint16_t uData)
3547{
3548 PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
3549 pEntry->Label.uType = kIemTbDbgEntryType_Label;
3550 pEntry->Label.uUnused = 0;
3551 pEntry->Label.enmLabel = (uint8_t)enmType;
3552 pEntry->Label.uData = uData;
3553}
3554
3555
3556/**
3557 * Debug Info: Record info about a threaded call.
3558 */
3559static void iemNativeDbgInfoAddThreadedCall(PIEMRECOMPILERSTATE pReNative, IEMTHREADEDFUNCS enmCall, bool fRecompiled)
3560{
3561 PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
3562 pEntry->ThreadedCall.uType = kIemTbDbgEntryType_ThreadedCall;
3563 pEntry->ThreadedCall.fRecompiled = fRecompiled;
3564 pEntry->ThreadedCall.uUnused = 0;
3565 pEntry->ThreadedCall.enmCall = (uint16_t)enmCall;
3566}
3567
3568
3569/**
3570 * Debug Info: Record info about a new guest instruction.
3571 */
3572static void iemNativeDbgInfoAddGuestInstruction(PIEMRECOMPILERSTATE pReNative, uint32_t fExec)
3573{
3574 PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
3575 pEntry->GuestInstruction.uType = kIemTbDbgEntryType_GuestInstruction;
3576 pEntry->GuestInstruction.uUnused = 0;
3577 pEntry->GuestInstruction.fExec = fExec;
3578}
3579
3580
3581/**
3582 * Debug Info: Record info about guest register shadowing.
3583 */
3584DECL_HIDDEN_THROW(void)
3585iemNativeDbgInfoAddGuestRegShadowing(PIEMRECOMPILERSTATE pReNative, IEMNATIVEGSTREG enmGstReg,
3586 uint8_t idxHstReg /*= UINT8_MAX*/, uint8_t idxHstRegPrev /*= UINT8_MAX*/)
3587{
3588 PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
3589 pEntry->GuestRegShadowing.uType = kIemTbDbgEntryType_GuestRegShadowing;
3590 pEntry->GuestRegShadowing.uUnused = 0;
3591 pEntry->GuestRegShadowing.idxGstReg = enmGstReg;
3592 pEntry->GuestRegShadowing.idxHstReg = idxHstReg;
3593 pEntry->GuestRegShadowing.idxHstRegPrev = idxHstRegPrev;
3594}
3595
3596
3597# ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
3598/**
3599 * Debug Info: Record info about guest register shadowing.
3600 */
3601DECL_HIDDEN_THROW(void)
3602iemNativeDbgInfoAddGuestSimdRegShadowing(PIEMRECOMPILERSTATE pReNative, IEMNATIVEGSTSIMDREG enmGstSimdReg,
3603 uint8_t idxHstSimdReg /*= UINT8_MAX*/, uint8_t idxHstSimdRegPrev /*= UINT8_MAX*/)
3604{
3605 PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
3606 pEntry->GuestSimdRegShadowing.uType = kIemTbDbgEntryType_GuestSimdRegShadowing;
3607 pEntry->GuestSimdRegShadowing.uUnused = 0;
3608 pEntry->GuestSimdRegShadowing.idxGstSimdReg = enmGstSimdReg;
3609 pEntry->GuestSimdRegShadowing.idxHstSimdReg = idxHstSimdReg;
3610 pEntry->GuestSimdRegShadowing.idxHstSimdRegPrev = idxHstSimdRegPrev;
3611}
3612# endif
3613
3614
3615# ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
3616/**
3617 * Debug Info: Record info about delayed RIP updates.
3618 */
3619DECL_HIDDEN_THROW(void) iemNativeDbgInfoAddDelayedPcUpdate(PIEMRECOMPILERSTATE pReNative, uint32_t offPc, uint32_t cInstrSkipped)
3620{
3621 PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo);
3622 pEntry->DelayedPcUpdate.uType = kIemTbDbgEntryType_DelayedPcUpdate;
3623 pEntry->DelayedPcUpdate.offPc = offPc;
3624 pEntry->DelayedPcUpdate.cInstrSkipped = cInstrSkipped;
3625}
3626# endif
3627
3628#endif /* IEMNATIVE_WITH_TB_DEBUG_INFO */
3629
3630
3631/*********************************************************************************************************************************
3632* Register Allocator *
3633*********************************************************************************************************************************/
3634
3635/**
3636 * Register parameter indexes (indexed by argument number).
3637 */
3638DECL_HIDDEN_CONST(uint8_t) const g_aidxIemNativeCallRegs[] =
3639{
3640 IEMNATIVE_CALL_ARG0_GREG,
3641 IEMNATIVE_CALL_ARG1_GREG,
3642 IEMNATIVE_CALL_ARG2_GREG,
3643 IEMNATIVE_CALL_ARG3_GREG,
3644#if defined(IEMNATIVE_CALL_ARG4_GREG)
3645 IEMNATIVE_CALL_ARG4_GREG,
3646# if defined(IEMNATIVE_CALL_ARG5_GREG)
3647 IEMNATIVE_CALL_ARG5_GREG,
3648# if defined(IEMNATIVE_CALL_ARG6_GREG)
3649 IEMNATIVE_CALL_ARG6_GREG,
3650# if defined(IEMNATIVE_CALL_ARG7_GREG)
3651 IEMNATIVE_CALL_ARG7_GREG,
3652# endif
3653# endif
3654# endif
3655#endif
3656};
3657AssertCompile(RT_ELEMENTS(g_aidxIemNativeCallRegs) == IEMNATIVE_CALL_ARG_GREG_COUNT);
3658
3659/**
3660 * Call register masks indexed by argument count.
3661 */
3662DECL_HIDDEN_CONST(uint32_t) const g_afIemNativeCallRegs[] =
3663{
3664 0,
3665 RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG),
3666 RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG),
3667 RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG),
3668 RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG)
3669 | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG),
3670#if defined(IEMNATIVE_CALL_ARG4_GREG)
3671 RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG)
3672 | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG4_GREG),
3673# if defined(IEMNATIVE_CALL_ARG5_GREG)
3674 RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG)
3675 | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG4_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG5_GREG),
3676# if defined(IEMNATIVE_CALL_ARG6_GREG)
3677 RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG)
3678 | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG4_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG5_GREG)
3679 | RT_BIT_32(IEMNATIVE_CALL_ARG6_GREG),
3680# if defined(IEMNATIVE_CALL_ARG7_GREG)
3681 RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG)
3682 | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG4_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG5_GREG)
3683 | RT_BIT_32(IEMNATIVE_CALL_ARG6_GREG) | RT_BIT_32(IEMNATIVE_CALL_ARG7_GREG),
3684# endif
3685# endif
3686# endif
3687#endif
3688};
3689
3690#ifdef IEMNATIVE_FP_OFF_STACK_ARG0
3691/**
3692 * BP offset of the stack argument slots.
3693 *
3694 * This array is indexed by \#argument - IEMNATIVE_CALL_ARG_GREG_COUNT and has
3695 * IEMNATIVE_FRAME_STACK_ARG_COUNT entries.
3696 */
3697DECL_HIDDEN_CONST(int32_t) const g_aoffIemNativeCallStackArgBpDisp[] =
3698{
3699 IEMNATIVE_FP_OFF_STACK_ARG0,
3700# ifdef IEMNATIVE_FP_OFF_STACK_ARG1
3701 IEMNATIVE_FP_OFF_STACK_ARG1,
3702# endif
3703# ifdef IEMNATIVE_FP_OFF_STACK_ARG2
3704 IEMNATIVE_FP_OFF_STACK_ARG2,
3705# endif
3706# ifdef IEMNATIVE_FP_OFF_STACK_ARG3
3707 IEMNATIVE_FP_OFF_STACK_ARG3,
3708# endif
3709};
3710AssertCompile(RT_ELEMENTS(g_aoffIemNativeCallStackArgBpDisp) == IEMNATIVE_FRAME_STACK_ARG_COUNT);
3711#endif /* IEMNATIVE_FP_OFF_STACK_ARG0 */
3712
3713/**
3714 * Info about shadowed guest register values.
3715 * @see IEMNATIVEGSTREG
3716 */
3717DECL_HIDDEN_CONST(IEMANTIVEGSTREGINFO const) g_aGstShadowInfo[] =
3718{
3719#define CPUMCTX_OFF_AND_SIZE(a_Reg) (uint32_t)RT_UOFFSETOF(VMCPU, cpum.GstCtx. a_Reg), RT_SIZEOFMEMB(VMCPU, cpum.GstCtx. a_Reg)
3720 /* [kIemNativeGstReg_GprFirst + X86_GREG_xAX] = */ { CPUMCTX_OFF_AND_SIZE(rax), "rax", },
3721 /* [kIemNativeGstReg_GprFirst + X86_GREG_xCX] = */ { CPUMCTX_OFF_AND_SIZE(rcx), "rcx", },
3722 /* [kIemNativeGstReg_GprFirst + X86_GREG_xDX] = */ { CPUMCTX_OFF_AND_SIZE(rdx), "rdx", },
3723 /* [kIemNativeGstReg_GprFirst + X86_GREG_xBX] = */ { CPUMCTX_OFF_AND_SIZE(rbx), "rbx", },
3724 /* [kIemNativeGstReg_GprFirst + X86_GREG_xSP] = */ { CPUMCTX_OFF_AND_SIZE(rsp), "rsp", },
3725 /* [kIemNativeGstReg_GprFirst + X86_GREG_xBP] = */ { CPUMCTX_OFF_AND_SIZE(rbp), "rbp", },
3726 /* [kIemNativeGstReg_GprFirst + X86_GREG_xSI] = */ { CPUMCTX_OFF_AND_SIZE(rsi), "rsi", },
3727 /* [kIemNativeGstReg_GprFirst + X86_GREG_xDI] = */ { CPUMCTX_OFF_AND_SIZE(rdi), "rdi", },
3728 /* [kIemNativeGstReg_GprFirst + X86_GREG_x8 ] = */ { CPUMCTX_OFF_AND_SIZE(r8), "r8", },
3729 /* [kIemNativeGstReg_GprFirst + X86_GREG_x9 ] = */ { CPUMCTX_OFF_AND_SIZE(r9), "r9", },
3730 /* [kIemNativeGstReg_GprFirst + X86_GREG_x10] = */ { CPUMCTX_OFF_AND_SIZE(r10), "r10", },
3731 /* [kIemNativeGstReg_GprFirst + X86_GREG_x11] = */ { CPUMCTX_OFF_AND_SIZE(r11), "r11", },
3732 /* [kIemNativeGstReg_GprFirst + X86_GREG_x12] = */ { CPUMCTX_OFF_AND_SIZE(r12), "r12", },
3733 /* [kIemNativeGstReg_GprFirst + X86_GREG_x13] = */ { CPUMCTX_OFF_AND_SIZE(r13), "r13", },
3734 /* [kIemNativeGstReg_GprFirst + X86_GREG_x14] = */ { CPUMCTX_OFF_AND_SIZE(r14), "r14", },
3735 /* [kIemNativeGstReg_GprFirst + X86_GREG_x15] = */ { CPUMCTX_OFF_AND_SIZE(r15), "r15", },
3736 /* [kIemNativeGstReg_Pc] = */ { CPUMCTX_OFF_AND_SIZE(rip), "rip", },
3737 /* [kIemNativeGstReg_Cr0] = */ { CPUMCTX_OFF_AND_SIZE(cr0), "cr0", },
3738 /* [kIemNativeGstReg_FpuFcw] = */ { CPUMCTX_OFF_AND_SIZE(XState.x87.FCW), "fcw", },
3739 /* [kIemNativeGstReg_FpuFsw] = */ { CPUMCTX_OFF_AND_SIZE(XState.x87.FSW), "fsw", },
3740 /* [kIemNativeGstReg_SegBaseFirst + 0] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[0].u64Base), "es_base", },
3741 /* [kIemNativeGstReg_SegBaseFirst + 1] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[1].u64Base), "cs_base", },
3742 /* [kIemNativeGstReg_SegBaseFirst + 2] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[2].u64Base), "ss_base", },
3743 /* [kIemNativeGstReg_SegBaseFirst + 3] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[3].u64Base), "ds_base", },
3744 /* [kIemNativeGstReg_SegBaseFirst + 4] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[4].u64Base), "fs_base", },
3745 /* [kIemNativeGstReg_SegBaseFirst + 5] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[5].u64Base), "gs_base", },
3746 /* [kIemNativeGstReg_SegAttribFirst + 0] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[0].Attr.u), "es_attrib", },
3747 /* [kIemNativeGstReg_SegAttribFirst + 1] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[1].Attr.u), "cs_attrib", },
3748 /* [kIemNativeGstReg_SegAttribFirst + 2] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[2].Attr.u), "ss_attrib", },
3749 /* [kIemNativeGstReg_SegAttribFirst + 3] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[3].Attr.u), "ds_attrib", },
3750 /* [kIemNativeGstReg_SegAttribFirst + 4] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[4].Attr.u), "fs_attrib", },
3751 /* [kIemNativeGstReg_SegAttribFirst + 5] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[5].Attr.u), "gs_attrib", },
3752 /* [kIemNativeGstReg_SegLimitFirst + 0] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[0].u32Limit), "es_limit", },
3753 /* [kIemNativeGstReg_SegLimitFirst + 1] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[1].u32Limit), "cs_limit", },
3754 /* [kIemNativeGstReg_SegLimitFirst + 2] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[2].u32Limit), "ss_limit", },
3755 /* [kIemNativeGstReg_SegLimitFirst + 3] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[3].u32Limit), "ds_limit", },
3756 /* [kIemNativeGstReg_SegLimitFirst + 4] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[4].u32Limit), "fs_limit", },
3757 /* [kIemNativeGstReg_SegLimitFirst + 5] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[5].u32Limit), "gs_limit", },
3758 /* [kIemNativeGstReg_SegSelFirst + 0] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[0].Sel), "es", },
3759 /* [kIemNativeGstReg_SegSelFirst + 1] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[1].Sel), "cs", },
3760 /* [kIemNativeGstReg_SegSelFirst + 2] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[2].Sel), "ss", },
3761 /* [kIemNativeGstReg_SegSelFirst + 3] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[3].Sel), "ds", },
3762 /* [kIemNativeGstReg_SegSelFirst + 4] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[4].Sel), "fs", },
3763 /* [kIemNativeGstReg_SegSelFirst + 5] = */ { CPUMCTX_OFF_AND_SIZE(aSRegs[5].Sel), "gs", },
3764 /* [kIemNativeGstReg_Cr4] = */ { CPUMCTX_OFF_AND_SIZE(cr4), "cr4", },
3765 /* [kIemNativeGstReg_Xcr0] = */ { CPUMCTX_OFF_AND_SIZE(aXcr[0]), "xcr0", },
3766 /* [kIemNativeGstReg_MxCsr] = */ { CPUMCTX_OFF_AND_SIZE(XState.x87.MXCSR), "mxcsr", },
3767 /* [kIemNativeGstReg_EFlags] = */ { CPUMCTX_OFF_AND_SIZE(eflags), "eflags", },
3768#undef CPUMCTX_OFF_AND_SIZE
3769};
3770AssertCompile(RT_ELEMENTS(g_aGstShadowInfo) == kIemNativeGstReg_End);
3771
3772
3773/** Host CPU general purpose register names. */
3774DECL_HIDDEN_CONST(const char * const) g_apszIemNativeHstRegNames[] =
3775{
3776#ifdef RT_ARCH_AMD64
3777 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
3778#elif RT_ARCH_ARM64
3779 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
3780 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "bp", "lr", "sp/xzr",
3781#else
3782# error "port me"
3783#endif
3784};
3785
3786
3787#if 0 /* unused */
3788/**
3789 * Tries to locate a suitable register in the given register mask.
3790 *
3791 * This ASSUMES the caller has done the minimal/optimal allocation checks and
3792 * failed.
3793 *
3794 * @returns Host register number on success, returns UINT8_MAX on failure.
3795 */
3796static uint8_t iemNativeRegTryAllocFree(PIEMRECOMPILERSTATE pReNative, uint32_t fRegMask)
3797{
3798 Assert(!(fRegMask & ~IEMNATIVE_HST_GREG_MASK));
3799 uint32_t fRegs = ~pReNative->Core.bmHstRegs & fRegMask;
3800 if (fRegs)
3801 {
3802 /** @todo pick better here: */
3803 unsigned const idxReg = ASMBitFirstSetU32(fRegs) - 1;
3804
3805 Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows != 0);
3806 Assert( (pReNative->Core.aHstRegs[idxReg].fGstRegShadows & pReNative->Core.bmGstRegShadows)
3807 == pReNative->Core.aHstRegs[idxReg].fGstRegShadows);
3808 Assert(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg));
3809
3810 pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxReg].fGstRegShadows;
3811 pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxReg);
3812 pReNative->Core.aHstRegs[idxReg].fGstRegShadows = 0;
3813 return idxReg;
3814 }
3815 return UINT8_MAX;
3816}
3817#endif /* unused */
3818
3819
3820/**
3821 * Locate a register, possibly freeing one up.
3822 *
3823 * This ASSUMES the caller has done the minimal/optimal allocation checks and
3824 * failed.
3825 *
3826 * @returns Host register number on success. Returns UINT8_MAX if no registers
3827 * found, the caller is supposed to deal with this and raise a
3828 * allocation type specific status code (if desired).
3829 *
3830 * @throws VBox status code if we're run into trouble spilling a variable of
3831 * recording debug info. Does NOT throw anything if we're out of
3832 * registers, though.
3833 */
3834static uint8_t iemNativeRegAllocFindFree(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fPreferVolatile,
3835 uint32_t fRegMask = IEMNATIVE_HST_GREG_MASK & ~IEMNATIVE_REG_FIXED_MASK)
3836{
3837 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFree);
3838 Assert(!(fRegMask & ~IEMNATIVE_HST_GREG_MASK));
3839 Assert(!(fRegMask & IEMNATIVE_REG_FIXED_MASK));
3840
3841 /*
3842 * Try a freed register that's shadowing a guest register.
3843 */
3844 uint32_t fRegs = ~pReNative->Core.bmHstRegs & fRegMask;
3845 if (fRegs)
3846 {
3847 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeNoVar);
3848
3849#ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
3850 /*
3851 * When we have livness information, we use it to kick out all shadowed
3852 * guest register that will not be needed any more in this TB. If we're
3853 * lucky, this may prevent us from ending up here again.
3854 *
3855 * Note! We must consider the previous entry here so we don't free
3856 * anything that the current threaded function requires (current
3857 * entry is produced by the next threaded function).
3858 */
3859 uint32_t const idxCurCall = pReNative->idxCurCall;
3860 if (idxCurCall > 0)
3861 {
3862 PCIEMLIVENESSENTRY const pLivenessEntry = &pReNative->paLivenessEntries[idxCurCall - 1];
3863
3864# ifndef IEMLIVENESS_EXTENDED_LAYOUT
3865 /* Construct a mask of the guest registers in the UNUSED and XCPT_OR_CALL state. */
3866 AssertCompile(IEMLIVENESS_STATE_UNUSED == 1 && IEMLIVENESS_STATE_XCPT_OR_CALL == 2);
3867 uint64_t fToFreeMask = pLivenessEntry->Bit0.bm64 ^ pLivenessEntry->Bit1.bm64; /* mask of regs in either UNUSED */
3868#else
3869 /* Construct a mask of the registers not in the read or write state.
3870 Note! We could skips writes, if they aren't from us, as this is just
3871 a hack to prevent trashing registers that have just been written
3872 or will be written when we retire the current instruction. */
3873 uint64_t fToFreeMask = ~pLivenessEntry->aBits[IEMLIVENESS_BIT_READ].bm64
3874 & ~pLivenessEntry->aBits[IEMLIVENESS_BIT_WRITE].bm64
3875 & IEMLIVENESSBIT_MASK;
3876#endif
3877 /* Merge EFLAGS. */
3878 uint64_t fTmp = fToFreeMask & (fToFreeMask >> 3); /* AF2,PF2,CF2,Other2 = AF,PF,CF,Other & OF,SF,ZF,AF */
3879 fTmp &= fTmp >> 2; /* CF3,Other3 = AF2,PF2 & CF2,Other2 */
3880 fTmp &= fTmp >> 1; /* Other4 = CF3 & Other3 */
3881 fToFreeMask &= RT_BIT_64(kIemNativeGstReg_EFlags) - 1;
3882 fToFreeMask |= fTmp & RT_BIT_64(kIemNativeGstReg_EFlags);
3883
3884 /* If it matches any shadowed registers. */
3885 if (pReNative->Core.bmGstRegShadows & fToFreeMask)
3886 {
3887 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed);
3888 iemNativeRegFlushGuestShadows(pReNative, fToFreeMask);
3889 Assert(fRegs == (~pReNative->Core.bmHstRegs & fRegMask)); /* this shall not change. */
3890
3891 /* See if we've got any unshadowed registers we can return now. */
3892 uint32_t const fUnshadowedRegs = fRegs & ~pReNative->Core.bmHstRegsWithGstShadow;
3893 if (fUnshadowedRegs)
3894 {
3895 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped);
3896 return (fPreferVolatile
3897 ? ASMBitFirstSetU32(fUnshadowedRegs)
3898 : ASMBitLastSetU32( fUnshadowedRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK
3899 ? fUnshadowedRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK : fUnshadowedRegs))
3900 - 1;
3901 }
3902 }
3903 }
3904#endif /* IEMNATIVE_WITH_LIVENESS_ANALYSIS */
3905
3906 unsigned const idxReg = (fPreferVolatile
3907 ? ASMBitFirstSetU32(fRegs)
3908 : ASMBitLastSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK
3909 ? fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK : fRegs))
3910 - 1;
3911
3912 Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows != 0);
3913 Assert( (pReNative->Core.aHstRegs[idxReg].fGstRegShadows & pReNative->Core.bmGstRegShadows)
3914 == pReNative->Core.aHstRegs[idxReg].fGstRegShadows);
3915 Assert(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg));
3916
3917 pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxReg);
3918 pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxReg].fGstRegShadows;
3919 pReNative->Core.aHstRegs[idxReg].fGstRegShadows = 0;
3920 return idxReg;
3921 }
3922
3923 /*
3924 * Try free up a variable that's in a register.
3925 *
3926 * We do two rounds here, first evacuating variables we don't need to be
3927 * saved on the stack, then in the second round move things to the stack.
3928 */
3929 STAM_REL_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeVar);
3930 for (uint32_t iLoop = 0; iLoop < 2; iLoop++)
3931 {
3932 uint32_t fVars = pReNative->Core.bmVars;
3933 while (fVars)
3934 {
3935 uint32_t const idxVar = ASMBitFirstSetU32(fVars) - 1;
3936 uint8_t const idxReg = pReNative->Core.aVars[idxVar].idxReg;
3937 if ( idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs)
3938 && (RT_BIT_32(idxReg) & fRegMask)
3939 && ( iLoop == 0
3940 ? pReNative->Core.aVars[idxVar].enmKind != kIemNativeVarKind_Stack
3941 : pReNative->Core.aVars[idxVar].enmKind == kIemNativeVarKind_Stack)
3942 && !pReNative->Core.aVars[idxVar].fRegAcquired)
3943 {
3944 Assert(pReNative->Core.bmHstRegs & RT_BIT_32(idxReg));
3945 Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxReg].fGstRegShadows)
3946 == pReNative->Core.aHstRegs[idxReg].fGstRegShadows);
3947 Assert(pReNative->Core.bmGstRegShadows < RT_BIT_64(kIemNativeGstReg_End));
3948 Assert( RT_BOOL(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg))
3949 == RT_BOOL(pReNative->Core.aHstRegs[idxReg].fGstRegShadows));
3950
3951 if (pReNative->Core.aVars[idxVar].enmKind == kIemNativeVarKind_Stack)
3952 {
3953 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, IEMNATIVE_VAR_IDX_PACK(idxVar));
3954 *poff = iemNativeEmitStoreGprByBp(pReNative, *poff, iemNativeStackCalcBpDisp(idxStackSlot), idxReg);
3955 }
3956
3957 pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX;
3958 pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxReg);
3959
3960 pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxReg);
3961 pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxReg].fGstRegShadows;
3962 pReNative->Core.aHstRegs[idxReg].fGstRegShadows = 0;
3963 return idxReg;
3964 }
3965 fVars &= ~RT_BIT_32(idxVar);
3966 }
3967 }
3968
3969 return UINT8_MAX;
3970}
3971
3972
3973/**
3974 * Reassigns a variable to a different register specified by the caller.
3975 *
3976 * @returns The new code buffer position.
3977 * @param pReNative The native recompile state.
3978 * @param off The current code buffer position.
3979 * @param idxVar The variable index.
3980 * @param idxRegOld The old host register number.
3981 * @param idxRegNew The new host register number.
3982 * @param pszCaller The caller for logging.
3983 */
3984static uint32_t iemNativeRegMoveVar(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVar,
3985 uint8_t idxRegOld, uint8_t idxRegNew, const char *pszCaller)
3986{
3987 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
3988 Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxRegOld);
3989#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
3990 Assert(!pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fSimdReg);
3991#endif
3992 RT_NOREF(pszCaller);
3993
3994 iemNativeRegClearGstRegShadowing(pReNative, idxRegNew, off);
3995
3996 uint64_t fGstRegShadows = pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows;
3997 Log12(("%s: moving idxVar=%#x from %s to %s (fGstRegShadows=%RX64)\n",
3998 pszCaller, idxVar, g_apszIemNativeHstRegNames[idxRegOld], g_apszIemNativeHstRegNames[idxRegNew], fGstRegShadows));
3999 off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegNew, idxRegOld);
4000
4001 pReNative->Core.aHstRegs[idxRegNew].fGstRegShadows = fGstRegShadows;
4002 pReNative->Core.aHstRegs[idxRegNew].enmWhat = kIemNativeWhat_Var;
4003 pReNative->Core.aHstRegs[idxRegNew].idxVar = idxVar;
4004 if (fGstRegShadows)
4005 {
4006 pReNative->Core.bmHstRegsWithGstShadow = (pReNative->Core.bmHstRegsWithGstShadow & ~RT_BIT_32(idxRegOld))
4007 | RT_BIT_32(idxRegNew);
4008 while (fGstRegShadows)
4009 {
4010 unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegShadows) - 1;
4011 fGstRegShadows &= ~RT_BIT_64(idxGstReg);
4012
4013 Assert(pReNative->Core.aidxGstRegShadows[idxGstReg] == idxRegOld);
4014 pReNative->Core.aidxGstRegShadows[idxGstReg] = idxRegNew;
4015 }
4016 }
4017
4018 pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg = (uint8_t)idxRegNew;
4019 pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows = 0;
4020 pReNative->Core.bmHstRegs = RT_BIT_32(idxRegNew) | (pReNative->Core.bmHstRegs & ~RT_BIT_32(idxRegOld));
4021 return off;
4022}
4023
4024
4025/**
4026 * Moves a variable to a different register or spills it onto the stack.
4027 *
4028 * This must be a stack variable (kIemNativeVarKind_Stack) because the other
4029 * kinds can easily be recreated if needed later.
4030 *
4031 * @returns The new code buffer position.
4032 * @param pReNative The native recompile state.
4033 * @param off The current code buffer position.
4034 * @param idxVar The variable index.
4035 * @param fForbiddenRegs Mask of the forbidden registers. Defaults to
4036 * call-volatile registers.
4037 */
4038DECL_HIDDEN_THROW(uint32_t) iemNativeRegMoveOrSpillStackVar(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVar,
4039 uint32_t fForbiddenRegs /*= IEMNATIVE_CALL_VOLATILE_GREG_MASK*/)
4040{
4041 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
4042 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
4043 Assert(pVar->enmKind == kIemNativeVarKind_Stack);
4044 Assert(!pVar->fRegAcquired);
4045
4046 uint8_t const idxRegOld = pVar->idxReg;
4047 Assert(idxRegOld < RT_ELEMENTS(pReNative->Core.aHstRegs));
4048 Assert(pReNative->Core.bmHstRegs & RT_BIT_32(idxRegOld));
4049 Assert(pReNative->Core.aHstRegs[idxRegOld].enmWhat == kIemNativeWhat_Var);
4050 Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows)
4051 == pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows);
4052 Assert(pReNative->Core.bmGstRegShadows < RT_BIT_64(kIemNativeGstReg_End));
4053 Assert( RT_BOOL(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxRegOld))
4054 == RT_BOOL(pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows));
4055
4056
4057 /** @todo Add statistics on this.*/
4058 /** @todo Implement basic variable liveness analysis (python) so variables
4059 * can be freed immediately once no longer used. This has the potential to
4060 * be trashing registers and stack for dead variables.
4061 * Update: This is mostly done. (Not IEMNATIVE_WITH_LIVENESS_ANALYSIS.) */
4062
4063 /*
4064 * First try move it to a different register, as that's cheaper.
4065 */
4066 fForbiddenRegs |= RT_BIT_32(idxRegOld);
4067 fForbiddenRegs |= IEMNATIVE_REG_FIXED_MASK;
4068 uint32_t fRegs = ~pReNative->Core.bmHstRegs & ~fForbiddenRegs;
4069 if (fRegs)
4070 {
4071 /* Avoid using shadow registers, if possible. */
4072 if (fRegs & ~pReNative->Core.bmHstRegsWithGstShadow)
4073 fRegs &= ~pReNative->Core.bmHstRegsWithGstShadow;
4074 unsigned const idxRegNew = ASMBitFirstSetU32(fRegs) - 1;
4075 return iemNativeRegMoveVar(pReNative, off, idxVar, idxRegOld, idxRegNew, "iemNativeRegMoveOrSpillStackVar");
4076 }
4077
4078 /*
4079 * Otherwise we must spill the register onto the stack.
4080 */
4081 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
4082 Log12(("iemNativeRegMoveOrSpillStackVar: spilling idxVar=%#x/idxReg=%d onto the stack (slot %#x bp+%d, off=%#x)\n",
4083 idxVar, idxRegOld, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
4084 off = iemNativeEmitStoreGprByBp(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxRegOld);
4085
4086 pVar->idxReg = UINT8_MAX;
4087 pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxRegOld);
4088 pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxRegOld);
4089 pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows;
4090 pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows = 0;
4091 return off;
4092}
4093
4094
4095/**
4096 * Allocates a temporary host general purpose register.
4097 *
4098 * This may emit code to save register content onto the stack in order to free
4099 * up a register.
4100 *
4101 * @returns The host register number; throws VBox status code on failure,
4102 * so no need to check the return value.
4103 * @param pReNative The native recompile state.
4104 * @param poff Pointer to the variable with the code buffer position.
4105 * This will be update if we need to move a variable from
4106 * register to stack in order to satisfy the request.
4107 * @param fPreferVolatile Whether to prefer volatile over non-volatile
4108 * registers (@c true, default) or the other way around
4109 * (@c false, for iemNativeRegAllocTmpForGuestReg()).
4110 */
4111DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fPreferVolatile /*= true*/)
4112{
4113 /*
4114 * Try find a completely unused register, preferably a call-volatile one.
4115 */
4116 uint8_t idxReg;
4117 uint32_t fRegs = ~pReNative->Core.bmHstRegs
4118 & ~pReNative->Core.bmHstRegsWithGstShadow
4119 & (~IEMNATIVE_REG_FIXED_MASK & IEMNATIVE_HST_GREG_MASK);
4120 if (fRegs)
4121 {
4122 if (fPreferVolatile)
4123 idxReg = (uint8_t)ASMBitFirstSetU32( fRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK
4124 ? fRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK : fRegs) - 1;
4125 else
4126 idxReg = (uint8_t)ASMBitFirstSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK
4127 ? fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK : fRegs) - 1;
4128 Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows == 0);
4129 Assert(!(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg)));
4130 Log12(("iemNativeRegAllocTmp: %s\n", g_apszIemNativeHstRegNames[idxReg]));
4131 }
4132 else
4133 {
4134 idxReg = iemNativeRegAllocFindFree(pReNative, poff, fPreferVolatile);
4135 AssertStmt(idxReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_TMP));
4136 Log12(("iemNativeRegAllocTmp: %s (slow)\n", g_apszIemNativeHstRegNames[idxReg]));
4137 }
4138 return iemNativeRegMarkAllocated(pReNative, idxReg, kIemNativeWhat_Tmp);
4139}
4140
4141
4142/**
4143 * Alternative version of iemNativeRegAllocTmp that takes mask with acceptable
4144 * registers.
4145 *
4146 * @returns The host register number; throws VBox status code on failure,
4147 * so no need to check the return value.
4148 * @param pReNative The native recompile state.
4149 * @param poff Pointer to the variable with the code buffer position.
4150 * This will be update if we need to move a variable from
4151 * register to stack in order to satisfy the request.
4152 * @param fRegMask Mask of acceptable registers.
4153 * @param fPreferVolatile Whether to prefer volatile over non-volatile
4154 * registers (@c true, default) or the other way around
4155 * (@c false, for iemNativeRegAllocTmpForGuestReg()).
4156 */
4157DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpEx(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint32_t fRegMask,
4158 bool fPreferVolatile /*= true*/)
4159{
4160 Assert(!(fRegMask & ~IEMNATIVE_HST_GREG_MASK));
4161 Assert(!(fRegMask & IEMNATIVE_REG_FIXED_MASK));
4162
4163 /*
4164 * Try find a completely unused register, preferably a call-volatile one.
4165 */
4166 uint8_t idxReg;
4167 uint32_t fRegs = ~pReNative->Core.bmHstRegs
4168 & ~pReNative->Core.bmHstRegsWithGstShadow
4169 & (~IEMNATIVE_REG_FIXED_MASK & IEMNATIVE_HST_GREG_MASK)
4170 & fRegMask;
4171 if (fRegs)
4172 {
4173 if (fPreferVolatile)
4174 idxReg = (uint8_t)ASMBitFirstSetU32( fRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK
4175 ? fRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK : fRegs) - 1;
4176 else
4177 idxReg = (uint8_t)ASMBitFirstSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK
4178 ? fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK : fRegs) - 1;
4179 Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows == 0);
4180 Assert(!(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg)));
4181 Log12(("iemNativeRegAllocTmpEx: %s\n", g_apszIemNativeHstRegNames[idxReg]));
4182 }
4183 else
4184 {
4185 idxReg = iemNativeRegAllocFindFree(pReNative, poff, fPreferVolatile, fRegMask);
4186 AssertStmt(idxReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_TMP));
4187 Log12(("iemNativeRegAllocTmpEx: %s (slow)\n", g_apszIemNativeHstRegNames[idxReg]));
4188 }
4189 return iemNativeRegMarkAllocated(pReNative, idxReg, kIemNativeWhat_Tmp);
4190}
4191
4192
4193/**
4194 * Allocates a temporary register for loading an immediate value into.
4195 *
4196 * This will emit code to load the immediate, unless there happens to be an
4197 * unused register with the value already loaded.
4198 *
4199 * The caller will not modify the returned register, it must be considered
4200 * read-only. Free using iemNativeRegFreeTmpImm.
4201 *
4202 * @returns The host register number; throws VBox status code on failure, so no
4203 * need to check the return value.
4204 * @param pReNative The native recompile state.
4205 * @param poff Pointer to the variable with the code buffer position.
4206 * @param uImm The immediate value that the register must hold upon
4207 * return.
4208 * @param fPreferVolatile Whether to prefer volatile over non-volatile
4209 * registers (@c true, default) or the other way around
4210 * (@c false).
4211 *
4212 * @note Reusing immediate values has not been implemented yet.
4213 */
4214DECL_HIDDEN_THROW(uint8_t)
4215iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm, bool fPreferVolatile /*= true*/)
4216{
4217 uint8_t const idxReg = iemNativeRegAllocTmp(pReNative, poff, fPreferVolatile);
4218 *poff = iemNativeEmitLoadGprImm64(pReNative, *poff, idxReg, uImm);
4219 return idxReg;
4220}
4221
4222
4223/**
4224 * Allocates a temporary host general purpose register for keeping a guest
4225 * register value.
4226 *
4227 * Since we may already have a register holding the guest register value,
4228 * code will be emitted to do the loading if that's not the case. Code may also
4229 * be emitted if we have to free up a register to satify the request.
4230 *
4231 * @returns The host register number; throws VBox status code on failure, so no
4232 * need to check the return value.
4233 * @param pReNative The native recompile state.
4234 * @param poff Pointer to the variable with the code buffer
4235 * position. This will be update if we need to move a
4236 * variable from register to stack in order to satisfy
4237 * the request.
4238 * @param enmGstReg The guest register that will is to be updated.
4239 * @param enmIntendedUse How the caller will be using the host register.
4240 * @param fNoVolatileRegs Set if no volatile register allowed, clear if any
4241 * register is okay (default). The ASSUMPTION here is
4242 * that the caller has already flushed all volatile
4243 * registers, so this is only applied if we allocate a
4244 * new register.
4245 * @param fSkipLivenessAssert Hack for liveness input validation of EFLAGS.
4246 * @sa iemNativeRegAllocTmpForGuestRegIfAlreadyPresent
4247 */
4248DECL_HIDDEN_THROW(uint8_t)
4249iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg,
4250 IEMNATIVEGSTREGUSE enmIntendedUse /*= kIemNativeGstRegUse_ReadOnly*/,
4251 bool fNoVolatileRegs /*= false*/, bool fSkipLivenessAssert /*= false*/)
4252{
4253 Assert(enmGstReg < kIemNativeGstReg_End && g_aGstShadowInfo[enmGstReg].cb != 0);
4254#ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
4255 AssertMsg( fSkipLivenessAssert
4256 || pReNative->idxCurCall == 0
4257 || enmGstReg == kIemNativeGstReg_Pc
4258 || (enmIntendedUse == kIemNativeGstRegUse_ForFullWrite
4259 ? IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg))
4260 : enmIntendedUse == kIemNativeGstRegUse_ForUpdate
4261 ? IEMLIVENESS_STATE_IS_MODIFY_EXPECTED( iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg))
4262 : IEMLIVENESS_STATE_IS_INPUT_EXPECTED( iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)) ),
4263 ("%s - %u\n", g_aGstShadowInfo[enmGstReg].pszName, iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)));
4264#endif
4265 RT_NOREF(fSkipLivenessAssert);
4266#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
4267 static const char * const s_pszIntendedUse[] = { "fetch", "update", "full write", "destructive calc" };
4268#endif
4269 uint32_t const fRegMask = !fNoVolatileRegs
4270 ? IEMNATIVE_HST_GREG_MASK & ~IEMNATIVE_REG_FIXED_MASK
4271 : IEMNATIVE_HST_GREG_MASK & ~IEMNATIVE_REG_FIXED_MASK & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK;
4272
4273 /*
4274 * First check if the guest register value is already in a host register.
4275 */
4276 if (pReNative->Core.bmGstRegShadows & RT_BIT_64(enmGstReg))
4277 {
4278 uint8_t idxReg = pReNative->Core.aidxGstRegShadows[enmGstReg];
4279 Assert(idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs));
4280 Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows & RT_BIT_64(enmGstReg));
4281 Assert(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg));
4282
4283 /* It's not supposed to be allocated... */
4284 if (!(pReNative->Core.bmHstRegs & RT_BIT_32(idxReg)))
4285 {
4286 /*
4287 * If the register will trash the guest shadow copy, try find a
4288 * completely unused register we can use instead. If that fails,
4289 * we need to disassociate the host reg from the guest reg.
4290 */
4291 /** @todo would be nice to know if preserving the register is in any way helpful. */
4292 /* If the purpose is calculations, try duplicate the register value as
4293 we'll be clobbering the shadow. */
4294 if ( enmIntendedUse == kIemNativeGstRegUse_Calculation
4295 && ( ~pReNative->Core.bmHstRegs
4296 & ~pReNative->Core.bmHstRegsWithGstShadow
4297 & (~IEMNATIVE_REG_FIXED_MASK & IEMNATIVE_HST_GREG_MASK)))
4298 {
4299 uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask);
4300
4301 *poff = iemNativeEmitLoadGprFromGpr(pReNative, *poff, idxRegNew, idxReg);
4302
4303 Log12(("iemNativeRegAllocTmpForGuestReg: Duplicated %s for guest %s into %s for destructive calc\n",
4304 g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName,
4305 g_apszIemNativeHstRegNames[idxRegNew]));
4306 idxReg = idxRegNew;
4307 }
4308 /* If the current register matches the restrictions, go ahead and allocate
4309 it for the caller. */
4310 else if (fRegMask & RT_BIT_32(idxReg))
4311 {
4312 pReNative->Core.bmHstRegs |= RT_BIT_32(idxReg);
4313 pReNative->Core.aHstRegs[idxReg].enmWhat = kIemNativeWhat_Tmp;
4314 pReNative->Core.aHstRegs[idxReg].idxVar = UINT8_MAX;
4315 if (enmIntendedUse != kIemNativeGstRegUse_Calculation)
4316 Log12(("iemNativeRegAllocTmpForGuestReg: Reusing %s for guest %s %s\n",
4317 g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName, s_pszIntendedUse[enmIntendedUse]));
4318 else
4319 {
4320 iemNativeRegClearGstRegShadowing(pReNative, idxReg, *poff);
4321 Log12(("iemNativeRegAllocTmpForGuestReg: Grabbing %s for guest %s - destructive calc\n",
4322 g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName));
4323 }
4324 }
4325 /* Otherwise, allocate a register that satisfies the caller and transfer
4326 the shadowing if compatible with the intended use. (This basically
4327 means the call wants a non-volatile register (RSP push/pop scenario).) */
4328 else
4329 {
4330 Assert(fNoVolatileRegs);
4331 uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask & ~RT_BIT_32(idxReg),
4332 !fNoVolatileRegs
4333 && enmIntendedUse == kIemNativeGstRegUse_Calculation);
4334 *poff = iemNativeEmitLoadGprFromGpr(pReNative, *poff, idxRegNew, idxReg);
4335 if (enmIntendedUse != kIemNativeGstRegUse_Calculation)
4336 {
4337 iemNativeRegTransferGstRegShadowing(pReNative, idxReg, idxRegNew, enmGstReg, *poff);
4338 Log12(("iemNativeRegAllocTmpForGuestReg: Transfering %s to %s for guest %s %s\n",
4339 g_apszIemNativeHstRegNames[idxReg], g_apszIemNativeHstRegNames[idxRegNew],
4340 g_aGstShadowInfo[enmGstReg].pszName, s_pszIntendedUse[enmIntendedUse]));
4341 }
4342 else
4343 Log12(("iemNativeRegAllocTmpForGuestReg: Duplicated %s for guest %s into %s for destructive calc\n",
4344 g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName,
4345 g_apszIemNativeHstRegNames[idxRegNew]));
4346 idxReg = idxRegNew;
4347 }
4348 }
4349 else
4350 {
4351 /*
4352 * Oops. Shadowed guest register already allocated!
4353 *
4354 * Allocate a new register, copy the value and, if updating, the
4355 * guest shadow copy assignment to the new register.
4356 */
4357 AssertMsg( enmIntendedUse != kIemNativeGstRegUse_ForUpdate
4358 && enmIntendedUse != kIemNativeGstRegUse_ForFullWrite,
4359 ("This shouldn't happen: idxReg=%d enmGstReg=%d enmIntendedUse=%s\n",
4360 idxReg, enmGstReg, s_pszIntendedUse[enmIntendedUse]));
4361
4362 /** @todo share register for readonly access. */
4363 uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask,
4364 enmIntendedUse == kIemNativeGstRegUse_Calculation);
4365
4366 if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
4367 *poff = iemNativeEmitLoadGprFromGpr(pReNative, *poff, idxRegNew, idxReg);
4368
4369 if ( enmIntendedUse != kIemNativeGstRegUse_ForUpdate
4370 && enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
4371 Log12(("iemNativeRegAllocTmpForGuestReg: Duplicated %s for guest %s into %s for %s\n",
4372 g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName,
4373 g_apszIemNativeHstRegNames[idxRegNew], s_pszIntendedUse[enmIntendedUse]));
4374 else
4375 {
4376 iemNativeRegTransferGstRegShadowing(pReNative, idxReg, idxRegNew, enmGstReg, *poff);
4377 Log12(("iemNativeRegAllocTmpForGuestReg: Moved %s for guest %s into %s for %s\n",
4378 g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName,
4379 g_apszIemNativeHstRegNames[idxRegNew], s_pszIntendedUse[enmIntendedUse]));
4380 }
4381 idxReg = idxRegNew;
4382 }
4383 Assert(RT_BIT_32(idxReg) & fRegMask); /* See assumption in fNoVolatileRegs docs. */
4384
4385#ifdef VBOX_STRICT
4386 /* Strict builds: Check that the value is correct. */
4387 *poff = iemNativeEmitGuestRegValueCheck(pReNative, *poff, idxReg, enmGstReg);
4388#endif
4389
4390 return idxReg;
4391 }
4392
4393 /*
4394 * Allocate a new register, load it with the guest value and designate it as a copy of the
4395 */
4396 uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask, enmIntendedUse == kIemNativeGstRegUse_Calculation);
4397
4398 if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
4399 *poff = iemNativeEmitLoadGprWithGstShadowReg(pReNative, *poff, idxRegNew, enmGstReg);
4400
4401 if (enmIntendedUse != kIemNativeGstRegUse_Calculation)
4402 iemNativeRegMarkAsGstRegShadow(pReNative, idxRegNew, enmGstReg, *poff);
4403 Log12(("iemNativeRegAllocTmpForGuestReg: Allocated %s for guest %s %s\n",
4404 g_apszIemNativeHstRegNames[idxRegNew], g_aGstShadowInfo[enmGstReg].pszName, s_pszIntendedUse[enmIntendedUse]));
4405
4406 return idxRegNew;
4407}
4408
4409
4410/**
4411 * Allocates a temporary host general purpose register that already holds the
4412 * given guest register value.
4413 *
4414 * The use case for this function is places where the shadowing state cannot be
4415 * modified due to branching and such. This will fail if the we don't have a
4416 * current shadow copy handy or if it's incompatible. The only code that will
4417 * be emitted here is value checking code in strict builds.
4418 *
4419 * The intended use can only be readonly!
4420 *
4421 * @returns The host register number, UINT8_MAX if not present.
4422 * @param pReNative The native recompile state.
4423 * @param poff Pointer to the instruction buffer offset.
4424 * Will be updated in strict builds if a register is
4425 * found.
4426 * @param enmGstReg The guest register that will is to be updated.
4427 * @note In strict builds, this may throw instruction buffer growth failures.
4428 * Non-strict builds will not throw anything.
4429 * @sa iemNativeRegAllocTmpForGuestReg
4430 */
4431DECL_HIDDEN_THROW(uint8_t)
4432iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg)
4433{
4434 Assert(enmGstReg < kIemNativeGstReg_End && g_aGstShadowInfo[enmGstReg].cb != 0);
4435#ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
4436 AssertMsg( pReNative->idxCurCall == 0
4437 || IEMLIVENESS_STATE_IS_INPUT_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg))
4438 || enmGstReg == kIemNativeGstReg_Pc,
4439 ("%s - %u\n", g_aGstShadowInfo[enmGstReg].pszName, iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)));
4440#endif
4441
4442 /*
4443 * First check if the guest register value is already in a host register.
4444 */
4445 if (pReNative->Core.bmGstRegShadows & RT_BIT_64(enmGstReg))
4446 {
4447 uint8_t idxReg = pReNative->Core.aidxGstRegShadows[enmGstReg];
4448 Assert(idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs));
4449 Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows & RT_BIT_64(enmGstReg));
4450 Assert(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg));
4451
4452 if (!(pReNative->Core.bmHstRegs & RT_BIT_32(idxReg)))
4453 {
4454 /*
4455 * We only do readonly use here, so easy compared to the other
4456 * variant of this code.
4457 */
4458 pReNative->Core.bmHstRegs |= RT_BIT_32(idxReg);
4459 pReNative->Core.aHstRegs[idxReg].enmWhat = kIemNativeWhat_Tmp;
4460 pReNative->Core.aHstRegs[idxReg].idxVar = UINT8_MAX;
4461 Log12(("iemNativeRegAllocTmpForGuestRegIfAlreadyPresent: Reusing %s for guest %s readonly\n",
4462 g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName));
4463
4464#ifdef VBOX_STRICT
4465 /* Strict builds: Check that the value is correct. */
4466 *poff = iemNativeEmitGuestRegValueCheck(pReNative, *poff, idxReg, enmGstReg);
4467#else
4468 RT_NOREF(poff);
4469#endif
4470 return idxReg;
4471 }
4472 }
4473
4474 return UINT8_MAX;
4475}
4476
4477
4478/**
4479 * Allocates argument registers for a function call.
4480 *
4481 * @returns New code buffer offset on success; throws VBox status code on failure, so no
4482 * need to check the return value.
4483 * @param pReNative The native recompile state.
4484 * @param off The current code buffer offset.
4485 * @param cArgs The number of arguments the function call takes.
4486 */
4487DECL_HIDDEN_THROW(uint32_t) iemNativeRegAllocArgs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs)
4488{
4489 AssertStmt(cArgs <= IEMNATIVE_CALL_ARG_GREG_COUNT + IEMNATIVE_FRAME_STACK_ARG_COUNT,
4490 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_4));
4491 Assert(RT_ELEMENTS(g_aidxIemNativeCallRegs) == IEMNATIVE_CALL_ARG_GREG_COUNT);
4492 Assert(RT_ELEMENTS(g_afIemNativeCallRegs) == IEMNATIVE_CALL_ARG_GREG_COUNT);
4493
4494 if (cArgs > RT_ELEMENTS(g_aidxIemNativeCallRegs))
4495 cArgs = RT_ELEMENTS(g_aidxIemNativeCallRegs);
4496 else if (cArgs == 0)
4497 return true;
4498
4499 /*
4500 * Do we get luck and all register are free and not shadowing anything?
4501 */
4502 if (((pReNative->Core.bmHstRegs | pReNative->Core.bmHstRegsWithGstShadow) & g_afIemNativeCallRegs[cArgs]) == 0)
4503 for (uint32_t i = 0; i < cArgs; i++)
4504 {
4505 uint8_t const idxReg = g_aidxIemNativeCallRegs[i];
4506 pReNative->Core.aHstRegs[idxReg].enmWhat = kIemNativeWhat_Arg;
4507 pReNative->Core.aHstRegs[idxReg].idxVar = UINT8_MAX;
4508 Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows == 0);
4509 }
4510 /*
4511 * Okay, not lucky so we have to free up the registers.
4512 */
4513 else
4514 for (uint32_t i = 0; i < cArgs; i++)
4515 {
4516 uint8_t const idxReg = g_aidxIemNativeCallRegs[i];
4517 if (pReNative->Core.bmHstRegs & RT_BIT_32(idxReg))
4518 {
4519 switch (pReNative->Core.aHstRegs[idxReg].enmWhat)
4520 {
4521 case kIemNativeWhat_Var:
4522 {
4523 uint8_t const idxVar = pReNative->Core.aHstRegs[idxReg].idxVar;
4524 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
4525 AssertStmt(IEMNATIVE_VAR_IDX_UNPACK(idxVar) < RT_ELEMENTS(pReNative->Core.aVars),
4526 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_5));
4527 Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxReg);
4528#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
4529 Assert(!pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fSimdReg);
4530#endif
4531
4532 if (pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].enmKind != kIemNativeVarKind_Stack)
4533 pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg = UINT8_MAX;
4534 else
4535 {
4536 off = iemNativeRegMoveOrSpillStackVar(pReNative, off, idxVar);
4537 Assert(!(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg)));
4538 }
4539 break;
4540 }
4541
4542 case kIemNativeWhat_Tmp:
4543 case kIemNativeWhat_Arg:
4544 case kIemNativeWhat_rc:
4545 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_5));
4546 default:
4547 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_6));
4548 }
4549
4550 }
4551 if (pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg))
4552 {
4553 Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows != 0);
4554 Assert( (pReNative->Core.aHstRegs[idxReg].fGstRegShadows & pReNative->Core.bmGstRegShadows)
4555 == pReNative->Core.aHstRegs[idxReg].fGstRegShadows);
4556 pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxReg);
4557 pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxReg].fGstRegShadows;
4558 pReNative->Core.aHstRegs[idxReg].fGstRegShadows = 0;
4559 }
4560 else
4561 Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows == 0);
4562 pReNative->Core.aHstRegs[idxReg].enmWhat = kIemNativeWhat_Arg;
4563 pReNative->Core.aHstRegs[idxReg].idxVar = UINT8_MAX;
4564 }
4565 pReNative->Core.bmHstRegs |= g_afIemNativeCallRegs[cArgs];
4566 return true;
4567}
4568
4569
4570DECL_HIDDEN_THROW(uint8_t) iemNativeRegAssignRc(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg);
4571
4572
4573#if 0
4574/**
4575 * Frees a register assignment of any type.
4576 *
4577 * @param pReNative The native recompile state.
4578 * @param idxHstReg The register to free.
4579 *
4580 * @note Does not update variables.
4581 */
4582DECLHIDDEN(void) iemNativeRegFree(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT
4583{
4584 Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs));
4585 Assert(pReNative->Core.bmHstRegs & RT_BIT_32(idxHstReg));
4586 Assert(!(IEMNATIVE_REG_FIXED_MASK & RT_BIT_32(idxHstReg)));
4587 Assert( pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Var
4588 || pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Tmp
4589 || pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Arg
4590 || pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_rc);
4591 Assert( pReNative->Core.aHstRegs[idxHstReg].enmWhat != kIemNativeWhat_Var
4592 || pReNative->Core.aVars[pReNative->Core.aHstRegs[idxHstReg].idxVar].idxReg == UINT8_MAX
4593 || (pReNative->Core.bmVars & RT_BIT_32(pReNative->Core.aHstRegs[idxHstReg].idxVar)));
4594 Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows)
4595 == pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows);
4596 Assert( RT_BOOL(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxHstReg))
4597 == RT_BOOL(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows));
4598
4599 pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg);
4600 /* no flushing, right:
4601 pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxHstReg);
4602 pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows;
4603 pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = 0;
4604 */
4605}
4606#endif
4607
4608
4609/**
4610 * Frees a temporary register.
4611 *
4612 * Any shadow copies of guest registers assigned to the host register will not
4613 * be flushed by this operation.
4614 */
4615DECLHIDDEN(void) iemNativeRegFreeTmp(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT
4616{
4617 Assert(pReNative->Core.bmHstRegs & RT_BIT_32(idxHstReg));
4618 Assert(pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Tmp);
4619 pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg);
4620 Log12(("iemNativeRegFreeTmp: %s (gst: %#RX64)\n",
4621 g_apszIemNativeHstRegNames[idxHstReg], pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows));
4622}
4623
4624
4625/**
4626 * Frees a temporary immediate register.
4627 *
4628 * It is assumed that the call has not modified the register, so it still hold
4629 * the same value as when it was allocated via iemNativeRegAllocTmpImm().
4630 */
4631DECLHIDDEN(void) iemNativeRegFreeTmpImm(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT
4632{
4633 iemNativeRegFreeTmp(pReNative, idxHstReg);
4634}
4635
4636
4637/**
4638 * Frees a register assigned to a variable.
4639 *
4640 * The register will be disassociated from the variable.
4641 */
4642DECLHIDDEN(void) iemNativeRegFreeVar(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg, bool fFlushShadows) RT_NOEXCEPT
4643{
4644 Assert(pReNative->Core.bmHstRegs & RT_BIT_32(idxHstReg));
4645 Assert(pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Var);
4646 uint8_t const idxVar = pReNative->Core.aHstRegs[idxHstReg].idxVar;
4647 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
4648 Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxHstReg);
4649#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
4650 Assert(!pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fSimdReg);
4651#endif
4652
4653 pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg = UINT8_MAX;
4654 pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg);
4655 if (!fFlushShadows)
4656 Log12(("iemNativeRegFreeVar: %s (gst: %#RX64) idxVar=%#x\n",
4657 g_apszIemNativeHstRegNames[idxHstReg], pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows, idxVar));
4658 else
4659 {
4660 pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxHstReg);
4661 uint64_t const fGstRegShadowsOld = pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows;
4662 pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = 0;
4663 pReNative->Core.bmGstRegShadows &= ~fGstRegShadowsOld;
4664 uint64_t fGstRegShadows = fGstRegShadowsOld;
4665 while (fGstRegShadows)
4666 {
4667 unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegShadows) - 1;
4668 fGstRegShadows &= ~RT_BIT_64(idxGstReg);
4669
4670 Assert(pReNative->Core.aidxGstRegShadows[idxGstReg] == idxHstReg);
4671 pReNative->Core.aidxGstRegShadows[idxGstReg] = UINT8_MAX;
4672 }
4673 Log12(("iemNativeRegFreeVar: %s (gst: %#RX64 -> 0) idxVar=%#x\n",
4674 g_apszIemNativeHstRegNames[idxHstReg], fGstRegShadowsOld, idxVar));
4675 }
4676}
4677
4678
4679#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
4680# ifdef LOG_ENABLED
4681/** Host CPU SIMD register names. */
4682DECL_HIDDEN_CONST(const char * const) g_apszIemNativeHstSimdRegNames[] =
4683{
4684# ifdef RT_ARCH_AMD64
4685 "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15"
4686# elif RT_ARCH_ARM64
4687 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
4688 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
4689# else
4690# error "port me"
4691# endif
4692};
4693# endif
4694
4695
4696/**
4697 * Frees a SIMD register assigned to a variable.
4698 *
4699 * The register will be disassociated from the variable.
4700 */
4701DECLHIDDEN(void) iemNativeSimdRegFreeVar(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg, bool fFlushShadows) RT_NOEXCEPT
4702{
4703 Assert(pReNative->Core.bmHstSimdRegs & RT_BIT_32(idxHstReg));
4704 Assert(pReNative->Core.aHstSimdRegs[idxHstReg].enmWhat == kIemNativeWhat_Var);
4705 uint8_t const idxVar = pReNative->Core.aHstSimdRegs[idxHstReg].idxVar;
4706 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
4707 Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxHstReg);
4708 Assert(pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fSimdReg);
4709
4710 pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg = UINT8_MAX;
4711 pReNative->Core.bmHstSimdRegs &= ~RT_BIT_32(idxHstReg);
4712 if (!fFlushShadows)
4713 Log12(("iemNativeSimdRegFreeVar: %s (gst: %#RX64) idxVar=%#x\n",
4714 g_apszIemNativeHstSimdRegNames[idxHstReg], pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows, idxVar));
4715 else
4716 {
4717 pReNative->Core.bmHstSimdRegsWithGstShadow &= ~RT_BIT_32(idxHstReg);
4718 uint64_t const fGstRegShadowsOld = pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows;
4719 pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows = 0;
4720 pReNative->Core.bmGstSimdRegShadows &= ~fGstRegShadowsOld;
4721 uint64_t fGstRegShadows = fGstRegShadowsOld;
4722 while (fGstRegShadows)
4723 {
4724 unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegShadows) - 1;
4725 fGstRegShadows &= ~RT_BIT_64(idxGstReg);
4726
4727 Assert(pReNative->Core.aidxGstSimdRegShadows[idxGstReg] == idxHstReg);
4728 pReNative->Core.aidxGstSimdRegShadows[idxGstReg] = UINT8_MAX;
4729 }
4730 Log12(("iemNativeSimdRegFreeVar: %s (gst: %#RX64 -> 0) idxVar=%#x\n",
4731 g_apszIemNativeHstSimdRegNames[idxHstReg], fGstRegShadowsOld, idxVar));
4732 }
4733}
4734#endif
4735
4736
4737/**
4738 * Called right before emitting a call instruction to move anything important
4739 * out of call-volatile registers, free and flush the call-volatile registers,
4740 * optionally freeing argument variables.
4741 *
4742 * @returns New code buffer offset, UINT32_MAX on failure.
4743 * @param pReNative The native recompile state.
4744 * @param off The code buffer offset.
4745 * @param cArgs The number of arguments the function call takes.
4746 * It is presumed that the host register part of these have
4747 * been allocated as such already and won't need moving,
4748 * just freeing.
4749 * @param fKeepVars Mask of variables that should keep their register
4750 * assignments. Caller must take care to handle these.
4751 */
4752DECL_HIDDEN_THROW(uint32_t)
4753iemNativeRegMoveAndFreeAndFlushAtCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs, uint32_t fKeepVars /*= 0*/)
4754{
4755 Assert(cArgs <= IEMNATIVE_CALL_MAX_ARG_COUNT);
4756
4757 /* fKeepVars will reduce this mask. */
4758 uint32_t fRegsToFree = IEMNATIVE_CALL_VOLATILE_GREG_MASK;
4759
4760 /*
4761 * Move anything important out of volatile registers.
4762 */
4763 if (cArgs > RT_ELEMENTS(g_aidxIemNativeCallRegs))
4764 cArgs = RT_ELEMENTS(g_aidxIemNativeCallRegs);
4765 uint32_t fRegsToMove = IEMNATIVE_CALL_VOLATILE_GREG_MASK
4766#ifdef IEMNATIVE_REG_FIXED_TMP0
4767 & ~RT_BIT_32(IEMNATIVE_REG_FIXED_TMP0)
4768#endif
4769#ifdef IEMNATIVE_REG_FIXED_TMP1
4770 & ~RT_BIT_32(IEMNATIVE_REG_FIXED_TMP1)
4771#endif
4772#ifdef IEMNATIVE_REG_FIXED_PC_DBG
4773 & ~RT_BIT_32(IEMNATIVE_REG_FIXED_PC_DBG)
4774#endif
4775 & ~g_afIemNativeCallRegs[cArgs];
4776
4777 fRegsToMove &= pReNative->Core.bmHstRegs;
4778 if (!fRegsToMove)
4779 { /* likely */ }
4780 else
4781 {
4782 Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: fRegsToMove=%#x\n", fRegsToMove));
4783 while (fRegsToMove != 0)
4784 {
4785 unsigned const idxReg = ASMBitFirstSetU32(fRegsToMove) - 1;
4786 fRegsToMove &= ~RT_BIT_32(idxReg);
4787
4788 switch (pReNative->Core.aHstRegs[idxReg].enmWhat)
4789 {
4790 case kIemNativeWhat_Var:
4791 {
4792 uint8_t const idxVar = pReNative->Core.aHstRegs[idxReg].idxVar;
4793 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
4794 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
4795 Assert(pVar->idxReg == idxReg);
4796 if (!(RT_BIT_32(IEMNATIVE_VAR_IDX_UNPACK(idxVar)) & fKeepVars))
4797 {
4798 Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: idxVar=%#x enmKind=%d idxReg=%d\n",
4799 idxVar, pVar->enmKind, pVar->idxReg));
4800 if (pVar->enmKind != kIemNativeVarKind_Stack)
4801 pVar->idxReg = UINT8_MAX;
4802 else
4803 off = iemNativeRegMoveOrSpillStackVar(pReNative, off, idxVar);
4804 }
4805 else
4806 fRegsToFree &= ~RT_BIT_32(idxReg);
4807 continue;
4808 }
4809
4810 case kIemNativeWhat_Arg:
4811 AssertMsgFailed(("What?!?: %u\n", idxReg));
4812 continue;
4813
4814 case kIemNativeWhat_rc:
4815 case kIemNativeWhat_Tmp:
4816 AssertMsgFailed(("Missing free: %u\n", idxReg));
4817 continue;
4818
4819 case kIemNativeWhat_FixedTmp:
4820 case kIemNativeWhat_pVCpuFixed:
4821 case kIemNativeWhat_pCtxFixed:
4822 case kIemNativeWhat_PcShadow:
4823 case kIemNativeWhat_FixedReserved:
4824 case kIemNativeWhat_Invalid:
4825 case kIemNativeWhat_End:
4826 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_1));
4827 }
4828 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_2));
4829 }
4830 }
4831
4832 /*
4833 * Do the actual freeing.
4834 */
4835 if (pReNative->Core.bmHstRegs & fRegsToFree)
4836 Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: bmHstRegs %#x -> %#x\n",
4837 pReNative->Core.bmHstRegs, pReNative->Core.bmHstRegs & ~fRegsToFree));
4838 pReNative->Core.bmHstRegs &= ~fRegsToFree;
4839
4840 /* If there are guest register shadows in any call-volatile register, we
4841 have to clear the corrsponding guest register masks for each register. */
4842 uint32_t fHstRegsWithGstShadow = pReNative->Core.bmHstRegsWithGstShadow & fRegsToFree;
4843 if (fHstRegsWithGstShadow)
4844 {
4845 Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: bmHstRegsWithGstShadow %#RX32 -> %#RX32; removed %#RX32\n",
4846 pReNative->Core.bmHstRegsWithGstShadow, pReNative->Core.bmHstRegsWithGstShadow & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK, fHstRegsWithGstShadow));
4847 pReNative->Core.bmHstRegsWithGstShadow &= ~fHstRegsWithGstShadow;
4848 do
4849 {
4850 unsigned const idxReg = ASMBitFirstSetU32(fHstRegsWithGstShadow) - 1;
4851 fHstRegsWithGstShadow &= ~RT_BIT_32(idxReg);
4852
4853 AssertMsg(pReNative->Core.aHstRegs[idxReg].fGstRegShadows != 0, ("idxReg=%#x\n", idxReg));
4854 pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxReg].fGstRegShadows;
4855 pReNative->Core.aHstRegs[idxReg].fGstRegShadows = 0;
4856 } while (fHstRegsWithGstShadow != 0);
4857 }
4858
4859 return off;
4860}
4861
4862
4863/**
4864 * Flushes a set of guest register shadow copies.
4865 *
4866 * This is usually done after calling a threaded function or a C-implementation
4867 * of an instruction.
4868 *
4869 * @param pReNative The native recompile state.
4870 * @param fGstRegs Set of guest registers to flush.
4871 */
4872DECLHIDDEN(void) iemNativeRegFlushGuestShadows(PIEMRECOMPILERSTATE pReNative, uint64_t fGstRegs) RT_NOEXCEPT
4873{
4874 /*
4875 * Reduce the mask by what's currently shadowed
4876 */
4877 uint64_t const bmGstRegShadowsOld = pReNative->Core.bmGstRegShadows;
4878 fGstRegs &= bmGstRegShadowsOld;
4879 if (fGstRegs)
4880 {
4881 uint64_t const bmGstRegShadowsNew = bmGstRegShadowsOld & ~fGstRegs;
4882 Log12(("iemNativeRegFlushGuestShadows: flushing %#RX64 (%#RX64 -> %#RX64)\n", fGstRegs, bmGstRegShadowsOld, bmGstRegShadowsNew));
4883 pReNative->Core.bmGstRegShadows = bmGstRegShadowsNew;
4884 if (bmGstRegShadowsNew)
4885 {
4886 /*
4887 * Partial.
4888 */
4889 do
4890 {
4891 unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegs) - 1;
4892 uint8_t const idxHstReg = pReNative->Core.aidxGstRegShadows[idxGstReg];
4893 Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aidxGstRegShadows));
4894 Assert(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxHstReg));
4895 Assert(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & RT_BIT_64(idxGstReg));
4896
4897 uint64_t const fInThisHstReg = (pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & fGstRegs) | RT_BIT_64(idxGstReg);
4898 fGstRegs &= ~fInThisHstReg;
4899 uint64_t const fGstRegShadowsNew = pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & ~fInThisHstReg;
4900 pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = fGstRegShadowsNew;
4901 if (!fGstRegShadowsNew)
4902 pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxHstReg);
4903 } while (fGstRegs != 0);
4904 }
4905 else
4906 {
4907 /*
4908 * Clear all.
4909 */
4910 do
4911 {
4912 unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegs) - 1;
4913 uint8_t const idxHstReg = pReNative->Core.aidxGstRegShadows[idxGstReg];
4914 Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aidxGstRegShadows));
4915 Assert(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxHstReg));
4916 Assert(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & RT_BIT_64(idxGstReg));
4917
4918 fGstRegs &= ~(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows | RT_BIT_64(idxGstReg));
4919 pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = 0;
4920 } while (fGstRegs != 0);
4921 pReNative->Core.bmHstRegsWithGstShadow = 0;
4922 }
4923 }
4924}
4925
4926
4927/**
4928 * Flushes guest register shadow copies held by a set of host registers.
4929 *
4930 * This is used with the TLB lookup code for ensuring that we don't carry on
4931 * with any guest shadows in volatile registers, as these will get corrupted by
4932 * a TLB miss.
4933 *
4934 * @param pReNative The native recompile state.
4935 * @param fHstRegs Set of host registers to flush guest shadows for.
4936 */
4937DECLHIDDEN(void) iemNativeRegFlushGuestShadowsByHostMask(PIEMRECOMPILERSTATE pReNative, uint32_t fHstRegs) RT_NOEXCEPT
4938{
4939 /*
4940 * Reduce the mask by what's currently shadowed.
4941 */
4942 uint32_t const bmHstRegsWithGstShadowOld = pReNative->Core.bmHstRegsWithGstShadow;
4943 fHstRegs &= bmHstRegsWithGstShadowOld;
4944 if (fHstRegs)
4945 {
4946 uint32_t const bmHstRegsWithGstShadowNew = bmHstRegsWithGstShadowOld & ~fHstRegs;
4947 Log12(("iemNativeRegFlushGuestShadowsByHostMask: flushing %#RX32 (%#RX32 -> %#RX32)\n",
4948 fHstRegs, bmHstRegsWithGstShadowOld, bmHstRegsWithGstShadowNew));
4949 pReNative->Core.bmHstRegsWithGstShadow = bmHstRegsWithGstShadowNew;
4950 if (bmHstRegsWithGstShadowNew)
4951 {
4952 /*
4953 * Partial (likely).
4954 */
4955 uint64_t fGstShadows = 0;
4956 do
4957 {
4958 unsigned const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1;
4959 Assert(!(pReNative->Core.bmHstRegs & RT_BIT_32(idxHstReg)));
4960 Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows)
4961 == pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows);
4962
4963 fGstShadows |= pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows;
4964 pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = 0;
4965 fHstRegs &= ~RT_BIT_32(idxHstReg);
4966 } while (fHstRegs != 0);
4967 pReNative->Core.bmGstRegShadows &= ~fGstShadows;
4968 }
4969 else
4970 {
4971 /*
4972 * Clear all.
4973 */
4974 do
4975 {
4976 unsigned const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1;
4977 Assert(!(pReNative->Core.bmHstRegs & RT_BIT_32(idxHstReg)));
4978 Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows)
4979 == pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows);
4980
4981 pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = 0;
4982 fHstRegs &= ~RT_BIT_32(idxHstReg);
4983 } while (fHstRegs != 0);
4984 pReNative->Core.bmGstRegShadows = 0;
4985 }
4986 }
4987}
4988
4989
4990/**
4991 * Restores guest shadow copies in volatile registers.
4992 *
4993 * This is used after calling a helper function (think TLB miss) to restore the
4994 * register state of volatile registers.
4995 *
4996 * @param pReNative The native recompile state.
4997 * @param off The code buffer offset.
4998 * @param fHstRegsActiveShadows Set of host registers which are allowed to
4999 * be active (allocated) w/o asserting. Hack.
5000 * @see iemNativeVarSaveVolatileRegsPreHlpCall(),
5001 * iemNativeVarRestoreVolatileRegsPostHlpCall()
5002 */
5003DECL_HIDDEN_THROW(uint32_t)
5004iemNativeRegRestoreGuestShadowsInVolatileRegs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fHstRegsActiveShadows)
5005{
5006 uint32_t fHstRegs = pReNative->Core.bmHstRegsWithGstShadow & IEMNATIVE_CALL_VOLATILE_GREG_MASK;
5007 if (fHstRegs)
5008 {
5009 Log12(("iemNativeRegRestoreGuestShadowsInVolatileRegs: %#RX32\n", fHstRegs));
5010 do
5011 {
5012 unsigned const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1;
5013
5014 /* It's not fatal if a register is active holding a variable that
5015 shadowing a guest register, ASSUMING all pending guest register
5016 writes were flushed prior to the helper call. However, we'll be
5017 emitting duplicate restores, so it wasts code space. */
5018 Assert(!(pReNative->Core.bmHstRegs & ~fHstRegsActiveShadows & RT_BIT_32(idxHstReg)));
5019 RT_NOREF(fHstRegsActiveShadows);
5020
5021 uint64_t const fGstRegShadows = pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows;
5022 Assert((pReNative->Core.bmGstRegShadows & fGstRegShadows) == fGstRegShadows);
5023 AssertStmt(fGstRegShadows != 0 && fGstRegShadows < RT_BIT_64(kIemNativeGstReg_End),
5024 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_12));
5025
5026 unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegShadows) - 1;
5027 off = iemNativeEmitLoadGprWithGstShadowReg(pReNative, off, idxHstReg, (IEMNATIVEGSTREG)idxGstReg);
5028
5029 fHstRegs &= ~RT_BIT_32(idxHstReg);
5030 } while (fHstRegs != 0);
5031 }
5032 return off;
5033}
5034
5035
5036
5037
5038/*********************************************************************************************************************************
5039* SIMD register allocator (largely code duplication of the GPR allocator for now but might diverge) *
5040*********************************************************************************************************************************/
5041#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
5042
5043/**
5044 * Info about shadowed guest SIMD register values.
5045 * @see IEMNATIVEGSTSIMDREG
5046 */
5047static struct
5048{
5049 /** Offset in VMCPU of XMM (low 128-bit) registers. */
5050 uint32_t offXmm;
5051 /** Offset in VMCPU of YmmHi (high 128-bit) registers. */
5052 uint32_t offYmm;
5053 /** Name (for logging). */
5054 const char *pszName;
5055} const g_aGstSimdShadowInfo[] =
5056{
5057#define CPUMCTX_OFF_AND_SIZE(a_iSimdReg) (uint32_t)RT_UOFFSETOF(VMCPU, cpum.GstCtx.XState.x87.aXMM[a_iSimdReg]), \
5058 (uint32_t)RT_UOFFSETOF(VMCPU, cpum.GstCtx.XState.u.YmmHi.aYmmHi[a_iSimdReg])
5059 /* [kIemNativeGstSimdReg_SimdRegFirst + 0] = */ { CPUMCTX_OFF_AND_SIZE(0), "ymm0", },
5060 /* [kIemNativeGstSimdReg_SimdRegFirst + 1] = */ { CPUMCTX_OFF_AND_SIZE(1), "ymm1", },
5061 /* [kIemNativeGstSimdReg_SimdRegFirst + 2] = */ { CPUMCTX_OFF_AND_SIZE(2), "ymm2", },
5062 /* [kIemNativeGstSimdReg_SimdRegFirst + 3] = */ { CPUMCTX_OFF_AND_SIZE(3), "ymm3", },
5063 /* [kIemNativeGstSimdReg_SimdRegFirst + 4] = */ { CPUMCTX_OFF_AND_SIZE(4), "ymm4", },
5064 /* [kIemNativeGstSimdReg_SimdRegFirst + 5] = */ { CPUMCTX_OFF_AND_SIZE(5), "ymm5", },
5065 /* [kIemNativeGstSimdReg_SimdRegFirst + 6] = */ { CPUMCTX_OFF_AND_SIZE(6), "ymm6", },
5066 /* [kIemNativeGstSimdReg_SimdRegFirst + 7] = */ { CPUMCTX_OFF_AND_SIZE(7), "ymm7", },
5067 /* [kIemNativeGstSimdReg_SimdRegFirst + 8] = */ { CPUMCTX_OFF_AND_SIZE(8), "ymm8", },
5068 /* [kIemNativeGstSimdReg_SimdRegFirst + 9] = */ { CPUMCTX_OFF_AND_SIZE(9), "ymm9", },
5069 /* [kIemNativeGstSimdReg_SimdRegFirst + 10] = */ { CPUMCTX_OFF_AND_SIZE(10), "ymm10", },
5070 /* [kIemNativeGstSimdReg_SimdRegFirst + 11] = */ { CPUMCTX_OFF_AND_SIZE(11), "ymm11", },
5071 /* [kIemNativeGstSimdReg_SimdRegFirst + 12] = */ { CPUMCTX_OFF_AND_SIZE(12), "ymm12", },
5072 /* [kIemNativeGstSimdReg_SimdRegFirst + 13] = */ { CPUMCTX_OFF_AND_SIZE(13), "ymm13", },
5073 /* [kIemNativeGstSimdReg_SimdRegFirst + 14] = */ { CPUMCTX_OFF_AND_SIZE(14), "ymm14", },
5074 /* [kIemNativeGstSimdReg_SimdRegFirst + 15] = */ { CPUMCTX_OFF_AND_SIZE(15), "ymm15", },
5075#undef CPUMCTX_OFF_AND_SIZE
5076};
5077AssertCompile(RT_ELEMENTS(g_aGstSimdShadowInfo) == kIemNativeGstSimdReg_End);
5078
5079
5080/**
5081 * Frees a temporary SIMD register.
5082 *
5083 * Any shadow copies of guest registers assigned to the host register will not
5084 * be flushed by this operation.
5085 */
5086DECLHIDDEN(void) iemNativeSimdRegFreeTmp(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstSimdReg) RT_NOEXCEPT
5087{
5088 Assert(pReNative->Core.bmHstSimdRegs & RT_BIT_32(idxHstSimdReg));
5089 Assert(pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmWhat == kIemNativeWhat_Tmp);
5090 pReNative->Core.bmHstSimdRegs &= ~RT_BIT_32(idxHstSimdReg);
5091 Log12(("iemNativeSimdRegFreeTmp: %s (gst: %#RX64)\n",
5092 g_apszIemNativeHstSimdRegNames[idxHstSimdReg], pReNative->Core.aHstSimdRegs[idxHstSimdReg].fGstRegShadows));
5093}
5094
5095
5096/**
5097 * Emits code to flush a pending write of the given SIMD register if any, also flushes the guest to host SIMD register association.
5098 *
5099 * @returns New code bufferoffset.
5100 * @param pReNative The native recompile state.
5101 * @param off Current code buffer position.
5102 * @param enmGstSimdReg The guest SIMD register to flush.
5103 */
5104DECL_HIDDEN_THROW(uint32_t)
5105iemNativeSimdRegFlushPendingWrite(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVEGSTSIMDREG enmGstSimdReg)
5106{
5107 uint8_t const idxHstSimdReg = pReNative->Core.aidxGstSimdRegShadows[enmGstSimdReg];
5108
5109 Log12(("iemNativeSimdRegFlushPendingWrite: Clearing guest register %s shadowed by host %s with state DirtyLo:%u DirtyHi:%u\n",
5110 g_aGstSimdShadowInfo[enmGstSimdReg].pszName, g_apszIemNativeHstSimdRegNames[idxHstSimdReg],
5111 IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_LO_U128(pReNative, enmGstSimdReg),
5112 IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_HI_U128(pReNative, enmGstSimdReg)));
5113
5114 if (IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_LO_U128(pReNative, enmGstSimdReg))
5115 {
5116 Assert( pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_256
5117 || pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_Low128);
5118 off = iemNativeEmitSimdStoreVecRegToVCpuLowU128(pReNative, off, idxHstSimdReg, g_aGstSimdShadowInfo[enmGstSimdReg].offXmm);
5119 }
5120
5121 if (IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_HI_U128(pReNative, enmGstSimdReg))
5122 {
5123 Assert( pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_256
5124 || pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_High128);
5125 off = iemNativeEmitSimdStoreVecRegToVCpuHighU128(pReNative, off, idxHstSimdReg, g_aGstSimdShadowInfo[enmGstSimdReg].offYmm);
5126 }
5127
5128 IEMNATIVE_SIMD_REG_STATE_CLR_DIRTY(pReNative, enmGstSimdReg);
5129 return off;
5130}
5131
5132
5133/**
5134 * Locate a register, possibly freeing one up.
5135 *
5136 * This ASSUMES the caller has done the minimal/optimal allocation checks and
5137 * failed.
5138 *
5139 * @returns Host register number on success. Returns UINT8_MAX if no registers
5140 * found, the caller is supposed to deal with this and raise a
5141 * allocation type specific status code (if desired).
5142 *
5143 * @throws VBox status code if we're run into trouble spilling a variable of
5144 * recording debug info. Does NOT throw anything if we're out of
5145 * registers, though.
5146 */
5147static uint8_t iemNativeSimdRegAllocFindFree(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fPreferVolatile,
5148 uint32_t fRegMask = IEMNATIVE_HST_SIMD_REG_MASK & ~IEMNATIVE_SIMD_REG_FIXED_MASK)
5149{
5150 //STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFree);
5151 Assert(!(fRegMask & ~IEMNATIVE_HST_SIMD_REG_MASK));
5152 Assert(!(fRegMask & IEMNATIVE_SIMD_REG_FIXED_MASK));
5153
5154 /*
5155 * Try a freed register that's shadowing a guest register.
5156 */
5157 uint32_t fRegs = ~pReNative->Core.bmHstSimdRegs & fRegMask;
5158 if (fRegs)
5159 {
5160 //STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeNoVar);
5161
5162#if 0 /** @todo def IEMNATIVE_WITH_LIVENESS_ANALYSIS */
5163 /*
5164 * When we have livness information, we use it to kick out all shadowed
5165 * guest register that will not be needed any more in this TB. If we're
5166 * lucky, this may prevent us from ending up here again.
5167 *
5168 * Note! We must consider the previous entry here so we don't free
5169 * anything that the current threaded function requires (current
5170 * entry is produced by the next threaded function).
5171 */
5172 uint32_t const idxCurCall = pReNative->idxCurCall;
5173 if (idxCurCall > 0)
5174 {
5175 PCIEMLIVENESSENTRY const pLivenessEntry = &pReNative->paLivenessEntries[idxCurCall - 1];
5176
5177# ifndef IEMLIVENESS_EXTENDED_LAYOUT
5178 /* Construct a mask of the guest registers in the UNUSED and XCPT_OR_CALL state. */
5179 AssertCompile(IEMLIVENESS_STATE_UNUSED == 1 && IEMLIVENESS_STATE_XCPT_OR_CALL == 2);
5180 uint64_t fToFreeMask = pLivenessEntry->Bit0.bm64 ^ pLivenessEntry->Bit1.bm64; /* mask of regs in either UNUSED */
5181#else
5182 /* Construct a mask of the registers not in the read or write state.
5183 Note! We could skips writes, if they aren't from us, as this is just
5184 a hack to prevent trashing registers that have just been written
5185 or will be written when we retire the current instruction. */
5186 uint64_t fToFreeMask = ~pLivenessEntry->aBits[IEMLIVENESS_BIT_READ].bm64
5187 & ~pLivenessEntry->aBits[IEMLIVENESS_BIT_WRITE].bm64
5188 & IEMLIVENESSBIT_MASK;
5189#endif
5190 /* If it matches any shadowed registers. */
5191 if (pReNative->Core.bmGstRegShadows & fToFreeMask)
5192 {
5193 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed);
5194 iemNativeRegFlushGuestShadows(pReNative, fToFreeMask);
5195 Assert(fRegs == (~pReNative->Core.bmHstRegs & fRegMask)); /* this shall not change. */
5196
5197 /* See if we've got any unshadowed registers we can return now. */
5198 uint32_t const fUnshadowedRegs = fRegs & ~pReNative->Core.bmHstRegsWithGstShadow;
5199 if (fUnshadowedRegs)
5200 {
5201 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped);
5202 return (fPreferVolatile
5203 ? ASMBitFirstSetU32(fUnshadowedRegs)
5204 : ASMBitLastSetU32( fUnshadowedRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK
5205 ? fUnshadowedRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK : fUnshadowedRegs))
5206 - 1;
5207 }
5208 }
5209 }
5210#endif /* IEMNATIVE_WITH_LIVENESS_ANALYSIS */
5211
5212 unsigned const idxReg = (fPreferVolatile
5213 ? ASMBitFirstSetU32(fRegs)
5214 : ASMBitLastSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK
5215 ? fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK : fRegs))
5216 - 1;
5217
5218 Assert(pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows != 0);
5219 Assert( (pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows & pReNative->Core.bmGstSimdRegShadows)
5220 == pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows);
5221 Assert(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxReg));
5222
5223 /* We need to flush any pending guest register writes this host SIMD register shadows. */
5224 uint32_t fGstRegShadows = pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows;
5225 uint32_t idxGstSimdReg = 0;
5226 do
5227 {
5228 if (fGstRegShadows & 0x1)
5229 {
5230 *poff = iemNativeSimdRegFlushPendingWrite(pReNative, *poff, IEMNATIVEGSTSIMDREG_SIMD(idxGstSimdReg));
5231 Assert(!IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_U256(pReNative, idxGstSimdReg));
5232 }
5233 idxGstSimdReg++;
5234 fGstRegShadows >>= 1;
5235 } while (fGstRegShadows);
5236
5237 pReNative->Core.bmHstSimdRegsWithGstShadow &= ~RT_BIT_32(idxReg);
5238 pReNative->Core.bmGstSimdRegShadows &= ~pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows;
5239 pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows = 0;
5240 pReNative->Core.aHstSimdRegs[idxReg].enmLoaded = kIemNativeGstSimdRegLdStSz_Invalid;
5241 return idxReg;
5242 }
5243
5244 /*
5245 * Try free up a variable that's in a register.
5246 *
5247 * We do two rounds here, first evacuating variables we don't need to be
5248 * saved on the stack, then in the second round move things to the stack.
5249 */
5250 //STAM_REL_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeRegFindFreeVar);
5251 AssertReleaseFailed(); /** @todo No variable support right now. */
5252#if 0
5253 for (uint32_t iLoop = 0; iLoop < 2; iLoop++)
5254 {
5255 uint32_t fVars = pReNative->Core.bmSimdVars;
5256 while (fVars)
5257 {
5258 uint32_t const idxVar = ASMBitFirstSetU32(fVars) - 1;
5259 uint8_t const idxReg = pReNative->Core.aSimdVars[idxVar].idxReg;
5260 if ( idxReg < RT_ELEMENTS(pReNative->Core.aHstSimdRegs)
5261 && (RT_BIT_32(idxReg) & fRegMask)
5262 && ( iLoop == 0
5263 ? pReNative->Core.aSimdVars[idxVar].enmKind != kIemNativeVarKind_Stack
5264 : pReNative->Core.aSimdVars[idxVar].enmKind == kIemNativeVarKind_Stack)
5265 && !pReNative->Core.aSimdVars[idxVar].fRegAcquired)
5266 {
5267 Assert(pReNative->Core.bmHstRegs & RT_BIT_32(idxReg));
5268 Assert( (pReNative->Core.bmGstSimdRegShadows & pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows)
5269 == pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows);
5270 Assert(pReNative->Core.bmGstSimdRegShadows < RT_BIT_64(kIemNativeGstReg_End));
5271 Assert( RT_BOOL(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg))
5272 == RT_BOOL(pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows));
5273
5274 if (pReNative->Core.aSimdVars[idxVar].enmKind == kIemNativeVarKind_Stack)
5275 {
5276 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, IEMNATIVE_VAR_IDX_PACK(idxVar));
5277 *poff = iemNativeEmitStoreGprByBp(pReNative, *poff, iemNativeStackCalcBpDisp(idxStackSlot), idxReg);
5278 }
5279
5280 pReNative->Core.aSimdVars[idxVar].idxReg = UINT8_MAX;
5281 pReNative->Core.bmHstSimdRegs &= ~RT_BIT_32(idxReg);
5282
5283 pReNative->Core.bmHstSimdRegsWithGstShadow &= ~RT_BIT_32(idxReg);
5284 pReNative->Core.bmGstSimdRegShadows &= ~pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows;
5285 pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows = 0;
5286 return idxReg;
5287 }
5288 fVars &= ~RT_BIT_32(idxVar);
5289 }
5290 }
5291#endif
5292
5293 AssertFailed();
5294 return UINT8_MAX;
5295}
5296
5297
5298/**
5299 * Flushes a set of guest register shadow copies.
5300 *
5301 * This is usually done after calling a threaded function or a C-implementation
5302 * of an instruction.
5303 *
5304 * @param pReNative The native recompile state.
5305 * @param fGstSimdRegs Set of guest SIMD registers to flush.
5306 */
5307DECLHIDDEN(void) iemNativeSimdRegFlushGuestShadows(PIEMRECOMPILERSTATE pReNative, uint64_t fGstSimdRegs) RT_NOEXCEPT
5308{
5309 /*
5310 * Reduce the mask by what's currently shadowed
5311 */
5312 uint64_t const bmGstSimdRegShadows = pReNative->Core.bmGstSimdRegShadows;
5313 fGstSimdRegs &= bmGstSimdRegShadows;
5314 if (fGstSimdRegs)
5315 {
5316 uint64_t const bmGstSimdRegShadowsNew = bmGstSimdRegShadows & ~fGstSimdRegs;
5317 Log12(("iemNativeSimdRegFlushGuestShadows: flushing %#RX64 (%#RX64 -> %#RX64)\n", fGstSimdRegs, bmGstSimdRegShadows, bmGstSimdRegShadowsNew));
5318 pReNative->Core.bmGstSimdRegShadows = bmGstSimdRegShadowsNew;
5319 if (bmGstSimdRegShadowsNew)
5320 {
5321 /*
5322 * Partial.
5323 */
5324 do
5325 {
5326 unsigned const idxGstReg = ASMBitFirstSetU64(fGstSimdRegs) - 1;
5327 uint8_t const idxHstReg = pReNative->Core.aidxGstSimdRegShadows[idxGstReg];
5328 Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aidxGstSimdRegShadows));
5329 Assert(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxHstReg));
5330 Assert(pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows & RT_BIT_64(idxGstReg));
5331 Assert(!IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_U256(pReNative, idxGstReg));
5332
5333 uint64_t const fInThisHstReg = (pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows & fGstSimdRegs) | RT_BIT_64(idxGstReg);
5334 fGstSimdRegs &= ~fInThisHstReg;
5335 uint64_t const fGstRegShadowsNew = pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows & ~fInThisHstReg;
5336 pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows = fGstRegShadowsNew;
5337 if (!fGstRegShadowsNew)
5338 {
5339 pReNative->Core.bmHstSimdRegsWithGstShadow &= ~RT_BIT_32(idxHstReg);
5340 pReNative->Core.aHstSimdRegs[idxHstReg].enmLoaded = kIemNativeGstSimdRegLdStSz_Invalid;
5341 }
5342 } while (fGstSimdRegs != 0);
5343 }
5344 else
5345 {
5346 /*
5347 * Clear all.
5348 */
5349 do
5350 {
5351 unsigned const idxGstReg = ASMBitFirstSetU64(fGstSimdRegs) - 1;
5352 uint8_t const idxHstReg = pReNative->Core.aidxGstSimdRegShadows[idxGstReg];
5353 Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aidxGstSimdRegShadows));
5354 Assert(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxHstReg));
5355 Assert(pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows & RT_BIT_64(idxGstReg));
5356 Assert(!IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_U256(pReNative, idxGstReg));
5357
5358 fGstSimdRegs &= ~(pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows | RT_BIT_64(idxGstReg));
5359 pReNative->Core.aHstSimdRegs[idxHstReg].fGstRegShadows = 0;
5360 pReNative->Core.aHstSimdRegs[idxHstReg].enmLoaded = kIemNativeGstSimdRegLdStSz_Invalid;
5361 } while (fGstSimdRegs != 0);
5362 pReNative->Core.bmHstSimdRegsWithGstShadow = 0;
5363 }
5364 }
5365}
5366
5367
5368/**
5369 * Allocates a temporary host SIMD register.
5370 *
5371 * This may emit code to save register content onto the stack in order to free
5372 * up a register.
5373 *
5374 * @returns The host register number; throws VBox status code on failure,
5375 * so no need to check the return value.
5376 * @param pReNative The native recompile state.
5377 * @param poff Pointer to the variable with the code buffer position.
5378 * This will be update if we need to move a variable from
5379 * register to stack in order to satisfy the request.
5380 * @param fPreferVolatile Whether to prefer volatile over non-volatile
5381 * registers (@c true, default) or the other way around
5382 * (@c false, for iemNativeRegAllocTmpForGuestReg()).
5383 */
5384DECL_HIDDEN_THROW(uint8_t) iemNativeSimdRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fPreferVolatile /*= true*/)
5385{
5386 /*
5387 * Try find a completely unused register, preferably a call-volatile one.
5388 */
5389 uint8_t idxSimdReg;
5390 uint32_t fRegs = ~pReNative->Core.bmHstRegs
5391 & ~pReNative->Core.bmHstRegsWithGstShadow
5392 & (~IEMNATIVE_SIMD_REG_FIXED_MASK & IEMNATIVE_HST_SIMD_REG_MASK);
5393 if (fRegs)
5394 {
5395 if (fPreferVolatile)
5396 idxSimdReg = (uint8_t)ASMBitFirstSetU32( fRegs & IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK
5397 ? fRegs & IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK : fRegs) - 1;
5398 else
5399 idxSimdReg = (uint8_t)ASMBitFirstSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK
5400 ? fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK : fRegs) - 1;
5401 Assert(pReNative->Core.aHstSimdRegs[idxSimdReg].fGstRegShadows == 0);
5402 Assert(!(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxSimdReg)));
5403 Log12(("iemNativeSimdRegAllocTmp: %s\n", g_apszIemNativeHstSimdRegNames[idxSimdReg]));
5404 }
5405 else
5406 {
5407 idxSimdReg = iemNativeSimdRegAllocFindFree(pReNative, poff, fPreferVolatile);
5408 AssertStmt(idxSimdReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_TMP));
5409 Log12(("iemNativeSimdRegAllocTmp: %s (slow)\n", g_apszIemNativeHstSimdRegNames[idxSimdReg]));
5410 }
5411
5412 Assert(pReNative->Core.aHstSimdRegs[idxSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_Invalid);
5413 return iemNativeSimdRegMarkAllocated(pReNative, idxSimdReg, kIemNativeWhat_Tmp);
5414}
5415
5416
5417/**
5418 * Alternative version of iemNativeSimdRegAllocTmp that takes mask with acceptable
5419 * registers.
5420 *
5421 * @returns The host register number; throws VBox status code on failure,
5422 * so no need to check the return value.
5423 * @param pReNative The native recompile state.
5424 * @param poff Pointer to the variable with the code buffer position.
5425 * This will be update if we need to move a variable from
5426 * register to stack in order to satisfy the request.
5427 * @param fRegMask Mask of acceptable registers.
5428 * @param fPreferVolatile Whether to prefer volatile over non-volatile
5429 * registers (@c true, default) or the other way around
5430 * (@c false, for iemNativeRegAllocTmpForGuestReg()).
5431 */
5432DECL_HIDDEN_THROW(uint8_t) iemNativeSimdRegAllocTmpEx(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint32_t fRegMask,
5433 bool fPreferVolatile /*= true*/)
5434{
5435 Assert(!(fRegMask & ~IEMNATIVE_HST_SIMD_REG_MASK));
5436 Assert(!(fRegMask & IEMNATIVE_SIMD_REG_FIXED_MASK));
5437
5438 /*
5439 * Try find a completely unused register, preferably a call-volatile one.
5440 */
5441 uint8_t idxSimdReg;
5442 uint32_t fRegs = ~pReNative->Core.bmHstSimdRegs
5443 & ~pReNative->Core.bmHstSimdRegsWithGstShadow
5444 & (~IEMNATIVE_SIMD_REG_FIXED_MASK & IEMNATIVE_HST_SIMD_REG_MASK)
5445 & fRegMask;
5446 if (fRegs)
5447 {
5448 if (fPreferVolatile)
5449 idxSimdReg = (uint8_t)ASMBitFirstSetU32( fRegs & IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK
5450 ? fRegs & IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK : fRegs) - 1;
5451 else
5452 idxSimdReg = (uint8_t)ASMBitFirstSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK
5453 ? fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK : fRegs) - 1;
5454 Assert(pReNative->Core.aHstSimdRegs[idxSimdReg].fGstRegShadows == 0);
5455 Assert(!(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxSimdReg)));
5456 Log12(("iemNativeSimdRegAllocTmpEx: %s\n", g_apszIemNativeHstSimdRegNames[idxSimdReg]));
5457 }
5458 else
5459 {
5460 idxSimdReg = iemNativeSimdRegAllocFindFree(pReNative, poff, fPreferVolatile, fRegMask);
5461 AssertStmt(idxSimdReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_TMP));
5462 Log12(("iemNativeSimdRegAllocTmpEx: %s (slow)\n", g_apszIemNativeHstSimdRegNames[idxSimdReg]));
5463 }
5464
5465 Assert(pReNative->Core.aHstSimdRegs[idxSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_Invalid);
5466 return iemNativeSimdRegMarkAllocated(pReNative, idxSimdReg, kIemNativeWhat_Tmp);
5467}
5468
5469
5470/**
5471 * Sets the indiactor for which part of the given SIMD register has valid data loaded.
5472 *
5473 * @param pReNative The native recompile state.
5474 * @param idxHstSimdReg The host SIMD register to update the state for.
5475 * @param enmLoadSz The load size to set.
5476 */
5477DECL_FORCE_INLINE(void) iemNativeSimdRegSetValidLoadFlag(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstSimdReg,
5478 IEMNATIVEGSTSIMDREGLDSTSZ enmLoadSz)
5479{
5480 /* Everything valid already? -> nothing to do. */
5481 if (pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_256)
5482 return;
5483
5484 if (pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_Invalid)
5485 pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded = enmLoadSz;
5486 else if (pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded != enmLoadSz)
5487 {
5488 Assert( ( pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_Low128
5489 && enmLoadSz == kIemNativeGstSimdRegLdStSz_High128)
5490 || ( pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded == kIemNativeGstSimdRegLdStSz_High128
5491 && enmLoadSz == kIemNativeGstSimdRegLdStSz_Low128));
5492 pReNative->Core.aHstSimdRegs[idxHstSimdReg].enmLoaded = kIemNativeGstSimdRegLdStSz_256;
5493 }
5494}
5495
5496
5497static uint32_t iemNativeSimdRegAllocLoadVecRegFromVecRegSz(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxHstSimdRegDst,
5498 uint8_t idxHstSimdRegSrc, IEMNATIVEGSTSIMDREGLDSTSZ enmLoadSzDst)
5499{
5500 /* Easy case first, either the destination loads the same range as what the source has already loaded or the source has loaded everything. */
5501 if ( pReNative->Core.aHstSimdRegs[idxHstSimdRegSrc].enmLoaded == enmLoadSzDst
5502 || pReNative->Core.aHstSimdRegs[idxHstSimdRegSrc].enmLoaded == kIemNativeGstSimdRegLdStSz_256)
5503 {
5504# ifdef RT_ARCH_ARM64
5505 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
5506 Assert(!(idxHstSimdRegDst & 0x1)); Assert(!(idxHstSimdRegSrc & 0x1));
5507# endif
5508
5509 if (idxHstSimdRegDst != idxHstSimdRegSrc)
5510 {
5511 switch (enmLoadSzDst)
5512 {
5513 case kIemNativeGstSimdRegLdStSz_256:
5514 off = iemNativeEmitSimdLoadVecRegFromVecRegU256(pReNative, off, idxHstSimdRegDst, idxHstSimdRegSrc);
5515 break;
5516 case kIemNativeGstSimdRegLdStSz_Low128:
5517 off = iemNativeEmitSimdLoadVecRegFromVecRegU128(pReNative, off, idxHstSimdRegDst, idxHstSimdRegSrc);
5518 break;
5519 case kIemNativeGstSimdRegLdStSz_High128:
5520 off = iemNativeEmitSimdLoadVecRegFromVecRegU128(pReNative, off, idxHstSimdRegDst + 1, idxHstSimdRegSrc + 1);
5521 break;
5522 default:
5523 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IPE_NOT_REACHED_DEFAULT_CASE));
5524 }
5525
5526 iemNativeSimdRegSetValidLoadFlag(pReNative, idxHstSimdRegDst, enmLoadSzDst);
5527 }
5528 }
5529 else
5530 {
5531 /* Complicated stuff where the source is currently missing something, later. */
5532 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IPE_NOT_REACHED_DEFAULT_CASE));
5533 }
5534
5535 return off;
5536}
5537
5538
5539/**
5540 * Allocates a temporary host SIMD register for keeping a guest
5541 * SIMD register value.
5542 *
5543 * Since we may already have a register holding the guest register value,
5544 * code will be emitted to do the loading if that's not the case. Code may also
5545 * be emitted if we have to free up a register to satify the request.
5546 *
5547 * @returns The host register number; throws VBox status code on failure, so no
5548 * need to check the return value.
5549 * @param pReNative The native recompile state.
5550 * @param poff Pointer to the variable with the code buffer
5551 * position. This will be update if we need to move a
5552 * variable from register to stack in order to satisfy
5553 * the request.
5554 * @param enmGstSimdReg The guest SIMD register that will is to be updated.
5555 * @param enmIntendedUse How the caller will be using the host register.
5556 * @param fNoVolatileRegs Set if no volatile register allowed, clear if any
5557 * register is okay (default). The ASSUMPTION here is
5558 * that the caller has already flushed all volatile
5559 * registers, so this is only applied if we allocate a
5560 * new register.
5561 * @sa iemNativeRegAllocTmpForGuestRegIfAlreadyPresent
5562 */
5563DECL_HIDDEN_THROW(uint8_t)
5564iemNativeSimdRegAllocTmpForGuestSimdReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTSIMDREG enmGstSimdReg,
5565 IEMNATIVEGSTSIMDREGLDSTSZ enmLoadSz, IEMNATIVEGSTREGUSE enmIntendedUse /*= kIemNativeGstRegUse_ReadOnly*/,
5566 bool fNoVolatileRegs /*= false*/)
5567{
5568 Assert(enmGstSimdReg < kIemNativeGstSimdReg_End);
5569#if defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS) && 0 /** @todo r=aeichner */
5570 AssertMsg( pReNative->idxCurCall == 0
5571 || (enmIntendedUse == kIemNativeGstRegUse_ForFullWrite
5572 ? IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstSimdReg))
5573 : enmIntendedUse == kIemNativeGstRegUse_ForUpdate
5574 ? IEMLIVENESS_STATE_IS_MODIFY_EXPECTED( iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstSimdReg))
5575 : IEMLIVENESS_STATE_IS_INPUT_EXPECTED( iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstSimdReg)) ),
5576 ("%s - %u\n", g_aGstSimdShadowInfo[enmGstSimdReg].pszName, iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstSimdReg)));
5577#endif
5578#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
5579 static const char * const s_pszIntendedUse[] = { "fetch", "update", "full write", "destructive calc" };
5580#endif
5581 uint32_t const fRegMask = !fNoVolatileRegs
5582 ? IEMNATIVE_HST_SIMD_REG_MASK & ~IEMNATIVE_SIMD_REG_FIXED_MASK
5583 : IEMNATIVE_HST_SIMD_REG_MASK & ~IEMNATIVE_SIMD_REG_FIXED_MASK & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK;
5584
5585 /*
5586 * First check if the guest register value is already in a host register.
5587 */
5588 if (pReNative->Core.bmGstSimdRegShadows & RT_BIT_64(enmGstSimdReg))
5589 {
5590 uint8_t idxSimdReg = pReNative->Core.aidxGstSimdRegShadows[enmGstSimdReg];
5591 Assert(idxSimdReg < RT_ELEMENTS(pReNative->Core.aHstSimdRegs));
5592 Assert(pReNative->Core.aHstSimdRegs[idxSimdReg].fGstRegShadows & RT_BIT_64(enmGstSimdReg));
5593 Assert(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxSimdReg));
5594
5595 /* It's not supposed to be allocated... */
5596 if (!(pReNative->Core.bmHstSimdRegs & RT_BIT_32(idxSimdReg)))
5597 {
5598 /*
5599 * If the register will trash the guest shadow copy, try find a
5600 * completely unused register we can use instead. If that fails,
5601 * we need to disassociate the host reg from the guest reg.
5602 */
5603 /** @todo would be nice to know if preserving the register is in any way helpful. */
5604 /* If the purpose is calculations, try duplicate the register value as
5605 we'll be clobbering the shadow. */
5606 if ( enmIntendedUse == kIemNativeGstRegUse_Calculation
5607 && ( ~pReNative->Core.bmHstSimdRegs
5608 & ~pReNative->Core.bmHstSimdRegsWithGstShadow
5609 & (~IEMNATIVE_SIMD_REG_FIXED_MASK & IEMNATIVE_HST_SIMD_REG_MASK)))
5610 {
5611 uint8_t const idxRegNew = iemNativeSimdRegAllocTmpEx(pReNative, poff, fRegMask);
5612
5613 *poff = iemNativeSimdRegAllocLoadVecRegFromVecRegSz(pReNative, *poff, idxRegNew, idxSimdReg, enmLoadSz);
5614
5615 Log12(("iemNativeSimdRegAllocTmpForGuestSimdReg: Duplicated %s for guest %s into %s for destructive calc\n",
5616 g_apszIemNativeHstSimdRegNames[idxSimdReg], g_aGstSimdShadowInfo[enmGstSimdReg].pszName,
5617 g_apszIemNativeHstSimdRegNames[idxRegNew]));
5618 idxSimdReg = idxRegNew;
5619 }
5620 /* If the current register matches the restrictions, go ahead and allocate
5621 it for the caller. */
5622 else if (fRegMask & RT_BIT_32(idxSimdReg))
5623 {
5624 pReNative->Core.bmHstSimdRegs |= RT_BIT_32(idxSimdReg);
5625 pReNative->Core.aHstSimdRegs[idxSimdReg].enmWhat = kIemNativeWhat_Tmp;
5626 if (enmIntendedUse != kIemNativeGstRegUse_Calculation)
5627 {
5628 if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
5629 *poff = iemNativeSimdRegAllocLoadVecRegFromVecRegSz(pReNative, *poff, idxSimdReg, idxSimdReg, enmLoadSz);
5630 else
5631 iemNativeSimdRegSetValidLoadFlag(pReNative, idxSimdReg, enmLoadSz);
5632 Log12(("iemNativeSimdRegAllocTmpForGuestSimdReg: Reusing %s for guest %s %s\n",
5633 g_apszIemNativeHstSimdRegNames[idxSimdReg], g_aGstSimdShadowInfo[enmGstSimdReg].pszName, s_pszIntendedUse[enmIntendedUse]));
5634 }
5635 else
5636 {
5637 iemNativeSimdRegClearGstSimdRegShadowing(pReNative, idxSimdReg, *poff);
5638 Log12(("iemNativeSimdRegAllocTmpForGuestSimdReg: Grabbing %s for guest %s - destructive calc\n",
5639 g_apszIemNativeHstSimdRegNames[idxSimdReg], g_aGstSimdShadowInfo[enmGstSimdReg].pszName));
5640 }
5641 }
5642 /* Otherwise, allocate a register that satisfies the caller and transfer
5643 the shadowing if compatible with the intended use. (This basically
5644 means the call wants a non-volatile register (RSP push/pop scenario).) */
5645 else
5646 {
5647 Assert(fNoVolatileRegs);
5648 uint8_t const idxRegNew = iemNativeSimdRegAllocTmpEx(pReNative, poff, fRegMask & ~RT_BIT_32(idxSimdReg),
5649 !fNoVolatileRegs
5650 && enmIntendedUse == kIemNativeGstRegUse_Calculation);
5651 *poff = iemNativeSimdRegAllocLoadVecRegFromVecRegSz(pReNative, *poff, idxRegNew, idxSimdReg, enmLoadSz);
5652 if (enmIntendedUse != kIemNativeGstRegUse_Calculation)
5653 {
5654 iemNativeSimdRegTransferGstSimdRegShadowing(pReNative, idxSimdReg, idxRegNew, enmGstSimdReg, *poff);
5655 Log12(("iemNativeSimdRegAllocTmpForGuestSimdReg: Transfering %s to %s for guest %s %s\n",
5656 g_apszIemNativeHstSimdRegNames[idxSimdReg], g_apszIemNativeHstSimdRegNames[idxRegNew],
5657 g_aGstSimdShadowInfo[enmGstSimdReg].pszName, s_pszIntendedUse[enmIntendedUse]));
5658 }
5659 else
5660 Log12(("iemNativeSimdRegAllocTmpForGuestSimdReg: Duplicated %s for guest %s into %s for destructive calc\n",
5661 g_apszIemNativeHstSimdRegNames[idxSimdReg], g_aGstSimdShadowInfo[enmGstSimdReg].pszName,
5662 g_apszIemNativeHstSimdRegNames[idxRegNew]));
5663 idxSimdReg = idxRegNew;
5664 }
5665 }
5666 else
5667 {
5668 /*
5669 * Oops. Shadowed guest register already allocated!
5670 *
5671 * Allocate a new register, copy the value and, if updating, the
5672 * guest shadow copy assignment to the new register.
5673 */
5674 AssertMsg( enmIntendedUse != kIemNativeGstRegUse_ForUpdate
5675 && enmIntendedUse != kIemNativeGstRegUse_ForFullWrite,
5676 ("This shouldn't happen: idxSimdReg=%d enmGstSimdReg=%d enmIntendedUse=%s\n",
5677 idxSimdReg, enmGstSimdReg, s_pszIntendedUse[enmIntendedUse]));
5678
5679 /** @todo share register for readonly access. */
5680 uint8_t const idxRegNew = iemNativeSimdRegAllocTmpEx(pReNative, poff, fRegMask,
5681 enmIntendedUse == kIemNativeGstRegUse_Calculation);
5682
5683 if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
5684 *poff = iemNativeSimdRegAllocLoadVecRegFromVecRegSz(pReNative, *poff, idxRegNew, idxSimdReg, enmLoadSz);
5685 else
5686 iemNativeSimdRegSetValidLoadFlag(pReNative, idxRegNew, enmLoadSz);
5687
5688 if ( enmIntendedUse != kIemNativeGstRegUse_ForUpdate
5689 && enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
5690 Log12(("iemNativeSimdRegAllocTmpForGuestSimdReg: Duplicated %s for guest %s into %s for %s\n",
5691 g_apszIemNativeHstSimdRegNames[idxSimdReg], g_aGstSimdShadowInfo[enmGstSimdReg].pszName,
5692 g_apszIemNativeHstSimdRegNames[idxRegNew], s_pszIntendedUse[enmIntendedUse]));
5693 else
5694 {
5695 iemNativeSimdRegTransferGstSimdRegShadowing(pReNative, idxSimdReg, idxRegNew, enmGstSimdReg, *poff);
5696 Log12(("iemNativeSimdRegAllocTmpForGuestSimdReg: Moved %s for guest %s into %s for %s\n",
5697 g_apszIemNativeHstSimdRegNames[idxSimdReg], g_aGstSimdShadowInfo[enmGstSimdReg].pszName,
5698 g_apszIemNativeHstSimdRegNames[idxRegNew], s_pszIntendedUse[enmIntendedUse]));
5699 }
5700 idxSimdReg = idxRegNew;
5701 }
5702 Assert(RT_BIT_32(idxSimdReg) & fRegMask); /* See assumption in fNoVolatileRegs docs. */
5703
5704#ifdef VBOX_STRICT
5705 /* Strict builds: Check that the value is correct. */
5706 if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
5707 *poff = iemNativeEmitGuestSimdRegValueCheck(pReNative, *poff, idxSimdReg, enmGstSimdReg, enmLoadSz);
5708#endif
5709
5710 return idxSimdReg;
5711 }
5712
5713 /*
5714 * Allocate a new register, load it with the guest value and designate it as a copy of the
5715 */
5716 uint8_t const idxRegNew = iemNativeSimdRegAllocTmpEx(pReNative, poff, fRegMask, enmIntendedUse == kIemNativeGstRegUse_Calculation);
5717
5718 if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
5719 *poff = iemNativeEmitLoadSimdRegWithGstShadowSimdReg(pReNative, *poff, idxRegNew, enmGstSimdReg, enmLoadSz);
5720 else
5721 iemNativeSimdRegSetValidLoadFlag(pReNative, idxRegNew, enmLoadSz);
5722
5723 if (enmIntendedUse != kIemNativeGstRegUse_Calculation)
5724 iemNativeSimdRegMarkAsGstSimdRegShadow(pReNative, idxRegNew, enmGstSimdReg, *poff);
5725
5726 Log12(("iemNativeRegAllocTmpForGuestSimdReg: Allocated %s for guest %s %s\n",
5727 g_apszIemNativeHstSimdRegNames[idxRegNew], g_aGstSimdShadowInfo[enmGstSimdReg].pszName, s_pszIntendedUse[enmIntendedUse]));
5728
5729 return idxRegNew;
5730}
5731
5732#endif /* IEMNATIVE_WITH_SIMD_REG_ALLOCATOR */
5733
5734
5735
5736/*********************************************************************************************************************************
5737* Code emitters for flushing pending guest register writes and sanity checks *
5738*********************************************************************************************************************************/
5739
5740#ifdef VBOX_STRICT
5741/**
5742 * Does internal register allocator sanity checks.
5743 */
5744DECLHIDDEN(void) iemNativeRegAssertSanity(PIEMRECOMPILERSTATE pReNative)
5745{
5746 /*
5747 * Iterate host registers building a guest shadowing set.
5748 */
5749 uint64_t bmGstRegShadows = 0;
5750 uint32_t bmHstRegsWithGstShadow = pReNative->Core.bmHstRegsWithGstShadow;
5751 AssertMsg(!(bmHstRegsWithGstShadow & IEMNATIVE_REG_FIXED_MASK), ("%#RX32\n", bmHstRegsWithGstShadow));
5752 while (bmHstRegsWithGstShadow)
5753 {
5754 unsigned const idxHstReg = ASMBitFirstSetU32(bmHstRegsWithGstShadow) - 1;
5755 Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs));
5756 bmHstRegsWithGstShadow &= ~RT_BIT_32(idxHstReg);
5757
5758 uint64_t fThisGstRegShadows = pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows;
5759 AssertMsg(fThisGstRegShadows != 0, ("idxHstReg=%d\n", idxHstReg));
5760 AssertMsg(fThisGstRegShadows < RT_BIT_64(kIemNativeGstReg_End), ("idxHstReg=%d %#RX64\n", idxHstReg, fThisGstRegShadows));
5761 bmGstRegShadows |= fThisGstRegShadows;
5762 while (fThisGstRegShadows)
5763 {
5764 unsigned const idxGstReg = ASMBitFirstSetU64(fThisGstRegShadows) - 1;
5765 fThisGstRegShadows &= ~RT_BIT_64(idxGstReg);
5766 AssertMsg(pReNative->Core.aidxGstRegShadows[idxGstReg] == idxHstReg,
5767 ("idxHstReg=%d aidxGstRegShadows[idxGstReg=%d]=%d\n",
5768 idxHstReg, idxGstReg, pReNative->Core.aidxGstRegShadows[idxGstReg]));
5769 }
5770 }
5771 AssertMsg(bmGstRegShadows == pReNative->Core.bmGstRegShadows,
5772 ("%RX64 vs %RX64; diff %RX64\n", bmGstRegShadows, pReNative->Core.bmGstRegShadows,
5773 bmGstRegShadows ^ pReNative->Core.bmGstRegShadows));
5774
5775 /*
5776 * Now the other way around, checking the guest to host index array.
5777 */
5778 bmHstRegsWithGstShadow = 0;
5779 bmGstRegShadows = pReNative->Core.bmGstRegShadows;
5780 Assert(bmGstRegShadows < RT_BIT_64(kIemNativeGstReg_End));
5781 while (bmGstRegShadows)
5782 {
5783 unsigned const idxGstReg = ASMBitFirstSetU64(bmGstRegShadows) - 1;
5784 Assert(idxGstReg < RT_ELEMENTS(pReNative->Core.aidxGstRegShadows));
5785 bmGstRegShadows &= ~RT_BIT_64(idxGstReg);
5786
5787 uint8_t const idxHstReg = pReNative->Core.aidxGstRegShadows[idxGstReg];
5788 AssertMsg(idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs), ("aidxGstRegShadows[%d]=%d\n", idxGstReg, idxHstReg));
5789 AssertMsg(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & RT_BIT_64(idxGstReg),
5790 ("idxGstReg=%d idxHstReg=%d fGstRegShadows=%RX64\n",
5791 idxGstReg, idxHstReg, pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows));
5792 bmHstRegsWithGstShadow |= RT_BIT_32(idxHstReg);
5793 }
5794 AssertMsg(bmHstRegsWithGstShadow == pReNative->Core.bmHstRegsWithGstShadow,
5795 ("%RX64 vs %RX64; diff %RX64\n", bmHstRegsWithGstShadow, pReNative->Core.bmHstRegsWithGstShadow,
5796 bmHstRegsWithGstShadow ^ pReNative->Core.bmHstRegsWithGstShadow));
5797}
5798#endif /* VBOX_STRICT */
5799
5800
5801/**
5802 * Flushes any delayed guest register writes.
5803 *
5804 * This must be called prior to calling CImpl functions and any helpers that use
5805 * the guest state (like raising exceptions) and such.
5806 *
5807 * This optimization has not yet been implemented. The first target would be
5808 * RIP updates, since these are the most common ones.
5809 */
5810DECL_HIDDEN_THROW(uint32_t)
5811iemNativeRegFlushPendingWritesSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint64_t fGstShwExcept, bool fFlushShadows)
5812{
5813#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
5814 if (!(fGstShwExcept & kIemNativeGstReg_Pc))
5815 off = iemNativeEmitPcWriteback(pReNative, off);
5816#else
5817 RT_NOREF(pReNative, fGstShwExcept);
5818#endif
5819
5820#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
5821 /** @todo r=bird: There must be a quicker way to check if anything needs
5822 * doing and then call simd function to do the flushing */
5823 /** @todo This doesn't mix well with fGstShwExcept but we ignore this for now and just flush everything. */
5824 for (uint8_t idxGstSimdReg = 0; idxGstSimdReg < RT_ELEMENTS(g_aGstSimdShadowInfo); idxGstSimdReg++)
5825 {
5826 Assert( (pReNative->Core.bmGstSimdRegShadows & RT_BIT_64(idxGstSimdReg)
5827 || !IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_U256(pReNative, idxGstSimdReg)));
5828
5829 if (IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_U256(pReNative, idxGstSimdReg))
5830 off = iemNativeSimdRegFlushPendingWrite(pReNative, off, IEMNATIVEGSTSIMDREG_SIMD(idxGstSimdReg));
5831
5832 if ( fFlushShadows
5833 && pReNative->Core.bmGstSimdRegShadows & RT_BIT_64(idxGstSimdReg))
5834 {
5835 uint8_t const idxHstSimdReg = pReNative->Core.aidxGstSimdRegShadows[idxGstSimdReg];
5836
5837 iemNativeSimdRegClearGstSimdRegShadowing(pReNative, idxHstSimdReg, off);
5838 iemNativeSimdRegFlushGuestShadows(pReNative, RT_BIT_64(IEMNATIVEGSTSIMDREG_SIMD(idxGstSimdReg)));
5839 }
5840 }
5841#else
5842 RT_NOREF(pReNative, fGstShwExcept, fFlushShadows);
5843#endif
5844
5845 return off;
5846}
5847
5848
5849#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
5850/**
5851 * Emits code to update the guest RIP value by adding the current offset since the start of the last RIP update.
5852 */
5853DECL_HIDDEN_THROW(uint32_t) iemNativeEmitPcWritebackSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off)
5854{
5855 Assert(pReNative->Core.offPc);
5856# ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
5857 iemNativeDbgInfoAddNativeOffset(pReNative, off);
5858 iemNativeDbgInfoAddDelayedPcUpdate(pReNative, pReNative->Core.offPc, pReNative->Core.cInstrPcUpdateSkipped);
5859# endif
5860
5861# ifndef IEMNATIVE_REG_FIXED_PC_DBG
5862 /* Allocate a temporary PC register. */
5863 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
5864
5865 /* Perform the addition and store the result. */
5866 off = iemNativeEmitAddGprImm(pReNative, off, idxPcReg, pReNative->Core.offPc);
5867 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
5868
5869 /* Free but don't flush the PC register. */
5870 iemNativeRegFreeTmp(pReNative, idxPcReg);
5871# else
5872 /* Compare the shadow with the context value, they should match. */
5873 off = iemNativeEmitAddGprImm(pReNative, off, IEMNATIVE_REG_FIXED_PC_DBG, pReNative->Core.offPc);
5874 off = iemNativeEmitGuestRegValueCheck(pReNative, off, IEMNATIVE_REG_FIXED_PC_DBG, kIemNativeGstReg_Pc);
5875# endif
5876
5877 STAM_COUNTER_ADD(&pReNative->pVCpu->iem.s.StatNativePcUpdateDelayed, pReNative->Core.cInstrPcUpdateSkipped);
5878 pReNative->Core.offPc = 0;
5879 pReNative->Core.cInstrPcUpdateSkipped = 0;
5880
5881 return off;
5882}
5883#endif /* IEMNATIVE_WITH_DELAYED_PC_UPDATING */
5884
5885
5886/*********************************************************************************************************************************
5887* Code Emitters (larger snippets) *
5888*********************************************************************************************************************************/
5889
5890/**
5891 * Loads the guest shadow register @a enmGstReg into host reg @a idxHstReg, zero
5892 * extending to 64-bit width.
5893 *
5894 * @returns New code buffer offset on success, UINT32_MAX on failure.
5895 * @param pReNative .
5896 * @param off The current code buffer position.
5897 * @param idxHstReg The host register to load the guest register value into.
5898 * @param enmGstReg The guest register to load.
5899 *
5900 * @note This does not mark @a idxHstReg as having a shadow copy of @a enmGstReg,
5901 * that is something the caller needs to do if applicable.
5902 */
5903DECL_HIDDEN_THROW(uint32_t)
5904iemNativeEmitLoadGprWithGstShadowReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxHstReg, IEMNATIVEGSTREG enmGstReg)
5905{
5906 Assert((unsigned)enmGstReg < (unsigned)kIemNativeGstReg_End);
5907 Assert(g_aGstShadowInfo[enmGstReg].cb != 0);
5908
5909 switch (g_aGstShadowInfo[enmGstReg].cb)
5910 {
5911 case sizeof(uint64_t):
5912 return iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off);
5913 case sizeof(uint32_t):
5914 return iemNativeEmitLoadGprFromVCpuU32(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off);
5915 case sizeof(uint16_t):
5916 return iemNativeEmitLoadGprFromVCpuU16(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off);
5917#if 0 /* not present in the table. */
5918 case sizeof(uint8_t):
5919 return iemNativeEmitLoadGprFromVCpuU8(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off);
5920#endif
5921 default:
5922 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IPE_NOT_REACHED_DEFAULT_CASE));
5923 }
5924}
5925
5926
5927#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
5928/**
5929 * Loads the guest shadow SIMD register @a enmGstSimdReg into host SIMD reg @a idxHstSimdReg.
5930 *
5931 * @returns New code buffer offset on success, UINT32_MAX on failure.
5932 * @param pReNative The recompiler state.
5933 * @param off The current code buffer position.
5934 * @param idxHstSimdReg The host register to load the guest register value into.
5935 * @param enmGstSimdReg The guest register to load.
5936 * @param enmLoadSz The load size of the register.
5937 *
5938 * @note This does not mark @a idxHstReg as having a shadow copy of @a enmGstReg,
5939 * that is something the caller needs to do if applicable.
5940 */
5941DECL_HIDDEN_THROW(uint32_t)
5942iemNativeEmitLoadSimdRegWithGstShadowSimdReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxHstSimdReg,
5943 IEMNATIVEGSTSIMDREG enmGstSimdReg, IEMNATIVEGSTSIMDREGLDSTSZ enmLoadSz)
5944{
5945 Assert((unsigned)enmGstSimdReg < RT_ELEMENTS(g_aGstSimdShadowInfo));
5946
5947 iemNativeSimdRegSetValidLoadFlag(pReNative, idxHstSimdReg, enmLoadSz);
5948 switch (enmLoadSz)
5949 {
5950 case kIemNativeGstSimdRegLdStSz_256:
5951 off = iemNativeEmitSimdLoadVecRegFromVCpuLowU128(pReNative, off, idxHstSimdReg, g_aGstSimdShadowInfo[enmGstSimdReg].offXmm);
5952 return iemNativeEmitSimdLoadVecRegFromVCpuHighU128(pReNative, off, idxHstSimdReg, g_aGstSimdShadowInfo[enmGstSimdReg].offYmm);
5953 case kIemNativeGstSimdRegLdStSz_Low128:
5954 return iemNativeEmitSimdLoadVecRegFromVCpuLowU128(pReNative, off, idxHstSimdReg, g_aGstSimdShadowInfo[enmGstSimdReg].offXmm);
5955 case kIemNativeGstSimdRegLdStSz_High128:
5956 return iemNativeEmitSimdLoadVecRegFromVCpuHighU128(pReNative, off, idxHstSimdReg, g_aGstSimdShadowInfo[enmGstSimdReg].offYmm);
5957 default:
5958 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IPE_NOT_REACHED_DEFAULT_CASE));
5959 }
5960}
5961#endif /* IEMNATIVE_WITH_SIMD_REG_ALLOCATOR */
5962
5963#ifdef VBOX_STRICT
5964
5965/**
5966 * Emitting code that checks that the value of @a idxReg is UINT32_MAX or less.
5967 *
5968 * @note May of course trash IEMNATIVE_REG_FIXED_TMP0.
5969 * Trashes EFLAGS on AMD64.
5970 */
5971DECL_HIDDEN_THROW(uint32_t)
5972iemNativeEmitTop32BitsClearCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxReg)
5973{
5974# ifdef RT_ARCH_AMD64
5975 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20);
5976
5977 /* rol reg64, 32 */
5978 pbCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_B);
5979 pbCodeBuf[off++] = 0xc1;
5980 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7);
5981 pbCodeBuf[off++] = 32;
5982
5983 /* test reg32, ffffffffh */
5984 if (idxReg >= 8)
5985 pbCodeBuf[off++] = X86_OP_REX_B;
5986 pbCodeBuf[off++] = 0xf7;
5987 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7);
5988 pbCodeBuf[off++] = 0xff;
5989 pbCodeBuf[off++] = 0xff;
5990 pbCodeBuf[off++] = 0xff;
5991 pbCodeBuf[off++] = 0xff;
5992
5993 /* je/jz +1 */
5994 pbCodeBuf[off++] = 0x74;
5995 pbCodeBuf[off++] = 0x01;
5996
5997 /* int3 */
5998 pbCodeBuf[off++] = 0xcc;
5999
6000 /* rol reg64, 32 */
6001 pbCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_B);
6002 pbCodeBuf[off++] = 0xc1;
6003 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7);
6004 pbCodeBuf[off++] = 32;
6005
6006# elif defined(RT_ARCH_ARM64)
6007 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
6008 /* lsr tmp0, reg64, #32 */
6009 pu32CodeBuf[off++] = Armv8A64MkInstrLsrImm(IEMNATIVE_REG_FIXED_TMP0, idxReg, 32);
6010 /* cbz tmp0, +1 */
6011 pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 2, IEMNATIVE_REG_FIXED_TMP0);
6012 /* brk #0x1100 */
6013 pu32CodeBuf[off++] = Armv8A64MkInstrBrk(UINT32_C(0x1100));
6014
6015# else
6016# error "Port me!"
6017# endif
6018 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6019 return off;
6020}
6021
6022
6023/**
6024 * Emitting code that checks that the content of register @a idxReg is the same
6025 * as what's in the guest register @a enmGstReg, resulting in a breakpoint
6026 * instruction if that's not the case.
6027 *
6028 * @note May of course trash IEMNATIVE_REG_FIXED_TMP0.
6029 * Trashes EFLAGS on AMD64.
6030 */
6031DECL_HIDDEN_THROW(uint32_t)
6032iemNativeEmitGuestRegValueCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxReg, IEMNATIVEGSTREG enmGstReg)
6033{
6034# ifdef RT_ARCH_AMD64
6035 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);
6036
6037 /* cmp reg, [mem] */
6038 if (g_aGstShadowInfo[enmGstReg].cb == sizeof(uint8_t))
6039 {
6040 if (idxReg >= 8)
6041 pbCodeBuf[off++] = X86_OP_REX_R;
6042 pbCodeBuf[off++] = 0x38;
6043 }
6044 else
6045 {
6046 if (g_aGstShadowInfo[enmGstReg].cb == sizeof(uint64_t))
6047 pbCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_R);
6048 else
6049 {
6050 if (g_aGstShadowInfo[enmGstReg].cb == sizeof(uint16_t))
6051 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
6052 else
6053 AssertStmt(g_aGstShadowInfo[enmGstReg].cb == sizeof(uint32_t),
6054 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_6));
6055 if (idxReg >= 8)
6056 pbCodeBuf[off++] = X86_OP_REX_R;
6057 }
6058 pbCodeBuf[off++] = 0x39;
6059 }
6060 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, idxReg, g_aGstShadowInfo[enmGstReg].off);
6061
6062 /* je/jz +1 */
6063 pbCodeBuf[off++] = 0x74;
6064 pbCodeBuf[off++] = 0x01;
6065
6066 /* int3 */
6067 pbCodeBuf[off++] = 0xcc;
6068
6069 /* For values smaller than the register size, we must check that the rest
6070 of the register is all zeros. */
6071 if (g_aGstShadowInfo[enmGstReg].cb < sizeof(uint32_t))
6072 {
6073 /* test reg64, imm32 */
6074 pbCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_B);
6075 pbCodeBuf[off++] = 0xf7;
6076 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7);
6077 pbCodeBuf[off++] = 0;
6078 pbCodeBuf[off++] = g_aGstShadowInfo[enmGstReg].cb > sizeof(uint8_t) ? 0 : 0xff;
6079 pbCodeBuf[off++] = 0xff;
6080 pbCodeBuf[off++] = 0xff;
6081
6082 /* je/jz +1 */
6083 pbCodeBuf[off++] = 0x74;
6084 pbCodeBuf[off++] = 0x01;
6085
6086 /* int3 */
6087 pbCodeBuf[off++] = 0xcc;
6088 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6089 }
6090 else
6091 {
6092 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6093 if (g_aGstShadowInfo[enmGstReg].cb == sizeof(uint32_t))
6094 iemNativeEmitTop32BitsClearCheck(pReNative, off, idxReg);
6095 }
6096
6097# elif defined(RT_ARCH_ARM64)
6098 /* mov TMP0, [gstreg] */
6099 off = iemNativeEmitLoadGprWithGstShadowReg(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, enmGstReg);
6100
6101 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
6102 /* sub tmp0, tmp0, idxReg */
6103 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(true /*fSub*/, IEMNATIVE_REG_FIXED_TMP0, IEMNATIVE_REG_FIXED_TMP0, idxReg);
6104 /* cbz tmp0, +1 */
6105 pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 2, IEMNATIVE_REG_FIXED_TMP0);
6106 /* brk #0x1000+enmGstReg */
6107 pu32CodeBuf[off++] = Armv8A64MkInstrBrk((uint32_t)enmGstReg | UINT32_C(0x1000));
6108 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6109
6110# else
6111# error "Port me!"
6112# endif
6113 return off;
6114}
6115
6116
6117# ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
6118/**
6119 * Emitting code that checks that the content of SIMD register @a idxSimdReg is the same
6120 * as what's in the guest register @a enmGstSimdReg, resulting in a breakpoint
6121 * instruction if that's not the case.
6122 *
6123 * @note May of course trash IEMNATIVE_SIMD_REG_FIXED_TMP0 and IEMNATIVE_REG_FIXED_TMP0.
6124 * Trashes EFLAGS on AMD64.
6125 */
6126DECL_HIDDEN_THROW(uint32_t)
6127iemNativeEmitGuestSimdRegValueCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxSimdReg,
6128 IEMNATIVEGSTSIMDREG enmGstSimdReg, IEMNATIVEGSTSIMDREGLDSTSZ enmLoadSz)
6129{
6130 /* We can't check the value against whats in CPUMCTX if the register is already marked as dirty, so skip the check. */
6131 if ( ( enmLoadSz == kIemNativeGstSimdRegLdStSz_256
6132 && ( IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_LO_U128(pReNative, enmGstSimdReg)
6133 || IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_HI_U128(pReNative, enmGstSimdReg)))
6134 || ( enmLoadSz == kIemNativeGstSimdRegLdStSz_Low128
6135 && IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_LO_U128(pReNative, enmGstSimdReg))
6136 || ( enmLoadSz == kIemNativeGstSimdRegLdStSz_High128
6137 && IEMNATIVE_SIMD_REG_STATE_IS_DIRTY_HI_U128(pReNative, enmGstSimdReg)))
6138 return off;
6139
6140# ifdef RT_ARCH_AMD64
6141 Assert(enmLoadSz == kIemNativeGstSimdRegLdStSz_Low128); /** @todo 256-bit variant. */
6142
6143 /* movdqa vectmp0, idxSimdReg */
6144 off = iemNativeEmitSimdLoadVecRegFromVecRegU128(pReNative, off, IEMNATIVE_SIMD_REG_FIXED_TMP0, idxSimdReg);
6145
6146 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 44);
6147
6148 /* pcmpeqq vectmp0, [gstreg] (ASSUMES SSE4.1) */
6149 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
6150 if (IEMNATIVE_SIMD_REG_FIXED_TMP0 >= 8)
6151 pbCodeBuf[off++] = X86_OP_REX_R;
6152 pbCodeBuf[off++] = 0x0f;
6153 pbCodeBuf[off++] = 0x38;
6154 pbCodeBuf[off++] = 0x29;
6155 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, IEMNATIVE_SIMD_REG_FIXED_TMP0, g_aGstSimdShadowInfo[enmGstSimdReg].offXmm);
6156
6157 /* pextrq tmp0, vectmp0, #0 (ASSUMES SSE4.1). */
6158 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
6159 pbCodeBuf[off++] = X86_OP_REX_W
6160 | (IEMNATIVE_SIMD_REG_FIXED_TMP0 < 8 ? 0 : X86_OP_REX_R)
6161 | (IEMNATIVE_REG_FIXED_TMP0 < 8 ? 0 : X86_OP_REX_B);
6162 pbCodeBuf[off++] = 0x0f;
6163 pbCodeBuf[off++] = 0x3a;
6164 pbCodeBuf[off++] = 0x16;
6165 pbCodeBuf[off++] = 0xeb;
6166 pbCodeBuf[off++] = 0x00;
6167
6168 /* cmp tmp0, 0xffffffffffffffff. */
6169 pbCodeBuf[off++] = X86_OP_REX_W | (IEMNATIVE_REG_FIXED_TMP0 < 8 ? 0 : X86_OP_REX_B);
6170 pbCodeBuf[off++] = 0x83;
6171 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, IEMNATIVE_REG_FIXED_TMP0 & 7);
6172 pbCodeBuf[off++] = 0xff;
6173
6174 /* je/jz +1 */
6175 pbCodeBuf[off++] = 0x74;
6176 pbCodeBuf[off++] = 0x01;
6177
6178 /* int3 */
6179 pbCodeBuf[off++] = 0xcc;
6180
6181 /* pextrq tmp0, vectmp0, #1 (ASSUMES SSE4.1). */
6182 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
6183 pbCodeBuf[off++] = X86_OP_REX_W
6184 | (IEMNATIVE_SIMD_REG_FIXED_TMP0 < 8 ? 0 : X86_OP_REX_R)
6185 | (IEMNATIVE_REG_FIXED_TMP0 < 8 ? 0 : X86_OP_REX_B);
6186 pbCodeBuf[off++] = 0x0f;
6187 pbCodeBuf[off++] = 0x3a;
6188 pbCodeBuf[off++] = 0x16;
6189 pbCodeBuf[off++] = 0xeb;
6190 pbCodeBuf[off++] = 0x01;
6191
6192 /* cmp tmp0, 0xffffffffffffffff. */
6193 pbCodeBuf[off++] = X86_OP_REX_W | (IEMNATIVE_REG_FIXED_TMP0 < 8 ? 0 : X86_OP_REX_B);
6194 pbCodeBuf[off++] = 0x83;
6195 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, IEMNATIVE_REG_FIXED_TMP0 & 7);
6196 pbCodeBuf[off++] = 0xff;
6197
6198 /* je/jz +1 */
6199 pbCodeBuf[off++] = 0x74;
6200 pbCodeBuf[off++] = 0x01;
6201
6202 /* int3 */
6203 pbCodeBuf[off++] = 0xcc;
6204
6205# elif defined(RT_ARCH_ARM64)
6206 /* mov vectmp0, [gstreg] */
6207 off = iemNativeEmitLoadSimdRegWithGstShadowSimdReg(pReNative, off, IEMNATIVE_SIMD_REG_FIXED_TMP0, enmGstSimdReg, enmLoadSz);
6208
6209 if (enmLoadSz == kIemNativeGstSimdRegLdStSz_Low128 || enmLoadSz == kIemNativeGstSimdRegLdStSz_256)
6210 {
6211 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);
6212 /* eor vectmp0, vectmp0, idxSimdReg */
6213 pu32CodeBuf[off++] = Armv8A64MkVecInstrEor(IEMNATIVE_SIMD_REG_FIXED_TMP0, IEMNATIVE_SIMD_REG_FIXED_TMP0, idxSimdReg);
6214 /* cnt vectmp0, vectmp0, #0*/
6215 pu32CodeBuf[off++] = Armv8A64MkVecInstrCnt(IEMNATIVE_SIMD_REG_FIXED_TMP0, IEMNATIVE_SIMD_REG_FIXED_TMP0);
6216 /* umov tmp0, vectmp0.D[0] */
6217 pu32CodeBuf[off++] = Armv8A64MkVecInstrUmov(IEMNATIVE_REG_FIXED_TMP0, IEMNATIVE_SIMD_REG_FIXED_TMP0,
6218 0 /*idxElem*/, kArmv8InstrUmovInsSz_U64);
6219 /* cbz tmp0, +1 */
6220 pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 2, IEMNATIVE_REG_FIXED_TMP0);
6221 /* brk #0x1000+enmGstReg */
6222 pu32CodeBuf[off++] = Armv8A64MkInstrBrk((uint32_t)enmGstSimdReg | UINT32_C(0x1000));
6223 }
6224
6225 if (enmLoadSz == kIemNativeGstSimdRegLdStSz_High128 || enmLoadSz == kIemNativeGstSimdRegLdStSz_256)
6226 {
6227 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);
6228 /* eor vectmp0 + 1, vectmp0 + 1, idxSimdReg */
6229 pu32CodeBuf[off++] = Armv8A64MkVecInstrEor(IEMNATIVE_SIMD_REG_FIXED_TMP0 + 1, IEMNATIVE_SIMD_REG_FIXED_TMP0 + 1, idxSimdReg + 1);
6230 /* cnt vectmp0 + 1, vectmp0 + 1, #0*/
6231 pu32CodeBuf[off++] = Armv8A64MkVecInstrCnt(IEMNATIVE_SIMD_REG_FIXED_TMP0 + 1, IEMNATIVE_SIMD_REG_FIXED_TMP0 + 1);
6232 /* umov tmp0, (vectmp0 + 1).D[0] */
6233 pu32CodeBuf[off++] = Armv8A64MkVecInstrUmov(IEMNATIVE_REG_FIXED_TMP0, IEMNATIVE_SIMD_REG_FIXED_TMP0 + 1,
6234 0 /*idxElem*/, kArmv8InstrUmovInsSz_U64);
6235 /* cbz tmp0, +1 */
6236 pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 2, IEMNATIVE_REG_FIXED_TMP0);
6237 /* brk #0x1000+enmGstReg */
6238 pu32CodeBuf[off++] = Armv8A64MkInstrBrk((uint32_t)enmGstSimdReg | UINT32_C(0x1000));
6239 }
6240
6241# else
6242# error "Port me!"
6243# endif
6244
6245 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6246 return off;
6247}
6248# endif /* IEMNATIVE_WITH_SIMD_REG_ALLOCATOR */
6249
6250
6251/**
6252 * Emitting code that checks that IEMCPU::fExec matches @a fExec for all
6253 * important bits.
6254 *
6255 * @note May of course trash IEMNATIVE_REG_FIXED_TMP0.
6256 * Trashes EFLAGS on AMD64.
6257 */
6258DECL_HIDDEN_THROW(uint32_t)
6259iemNativeEmitExecFlagsCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fExec)
6260{
6261 uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off);
6262 off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.fExec));
6263 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegTmp, IEMTB_F_IEM_F_MASK & IEMTB_F_KEY_MASK);
6264 off = iemNativeEmitCmpGpr32WithImm(pReNative, off, idxRegTmp, fExec & IEMTB_F_KEY_MASK);
6265
6266#ifdef RT_ARCH_AMD64
6267 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
6268
6269 /* je/jz +1 */
6270 pbCodeBuf[off++] = 0x74;
6271 pbCodeBuf[off++] = 0x01;
6272
6273 /* int3 */
6274 pbCodeBuf[off++] = 0xcc;
6275
6276# elif defined(RT_ARCH_ARM64)
6277 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
6278
6279 /* b.eq +1 */
6280 pu32CodeBuf[off++] = Armv8A64MkInstrBCond(kArmv8InstrCond_Eq, 2);
6281 /* brk #0x2000 */
6282 pu32CodeBuf[off++] = Armv8A64MkInstrBrk(UINT32_C(0x2000));
6283
6284# else
6285# error "Port me!"
6286# endif
6287 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6288
6289 iemNativeRegFreeTmp(pReNative, idxRegTmp);
6290 return off;
6291}
6292
6293#endif /* VBOX_STRICT */
6294
6295
6296#ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING
6297/**
6298 * Worker for IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK.
6299 */
6300DECL_HIDDEN_THROW(uint32_t)
6301iemNativeEmitEFlagsSkippingCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fEflNeeded)
6302{
6303 uint32_t const offVCpu = RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags);
6304
6305 fEflNeeded &= X86_EFL_STATUS_BITS;
6306 if (fEflNeeded)
6307 {
6308# ifdef RT_ARCH_AMD64
6309 /* test dword [pVCpu + offVCpu], imm32 */
6310 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
6311 if (fEflNeeded <= 0xff)
6312 {
6313 pCodeBuf[off++] = 0xf6;
6314 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 0, offVCpu);
6315 pCodeBuf[off++] = RT_BYTE1(fEflNeeded);
6316 }
6317 else
6318 {
6319 pCodeBuf[off++] = 0xf7;
6320 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 0, offVCpu);
6321 pCodeBuf[off++] = RT_BYTE1(fEflNeeded);
6322 pCodeBuf[off++] = RT_BYTE2(fEflNeeded);
6323 pCodeBuf[off++] = RT_BYTE3(fEflNeeded);
6324 pCodeBuf[off++] = RT_BYTE4(fEflNeeded);
6325 }
6326 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6327
6328# else
6329 uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off);
6330 off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, idxRegTmp, offVCpu);
6331 off = iemNativeEmitTestAnyBitsInGpr(pReNative, off, idxRegTmp, fEflNeeded);
6332# ifdef RT_ARCH_ARM64
6333 off = iemNativeEmitJzToFixed(pReNative, off, off + 2);
6334 off = iemNativeEmitBrk(pReNative, off, 0x7777);
6335# else
6336# error "Port me!"
6337# endif
6338 iemNativeRegFreeTmp(pReNative, idxRegTmp);
6339# endif
6340 }
6341 return off;
6342}
6343#endif /* IEMNATIVE_STRICT_EFLAGS_SKIPPING */
6344
6345
6346/**
6347 * Emits a code for checking the return code of a call and rcPassUp, returning
6348 * from the code if either are non-zero.
6349 */
6350DECL_HIDDEN_THROW(uint32_t)
6351iemNativeEmitCheckCallRetAndPassUp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr)
6352{
6353#ifdef RT_ARCH_AMD64
6354 /*
6355 * AMD64: eax = call status code.
6356 */
6357
6358 /* edx = rcPassUp */
6359 off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, X86_GREG_xDX, RT_UOFFSETOF(VMCPUCC, iem.s.rcPassUp));
6360# ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
6361 off = iemNativeEmitLoadGpr8Imm(pReNative, off, X86_GREG_xCX, idxInstr);
6362# endif
6363
6364 /* edx = eax | rcPassUp */
6365 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
6366 pbCodeBuf[off++] = 0x0b; /* or edx, eax */
6367 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, X86_GREG_xDX, X86_GREG_xAX);
6368 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6369
6370 /* Jump to non-zero status return path. */
6371 off = iemNativeEmitJnzToNewLabel(pReNative, off, kIemNativeLabelType_NonZeroRetOrPassUp);
6372
6373 /* done. */
6374
6375#elif RT_ARCH_ARM64
6376 /*
6377 * ARM64: w0 = call status code.
6378 */
6379# ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
6380 off = iemNativeEmitLoadGprImm64(pReNative, off, ARMV8_A64_REG_X2, idxInstr);
6381# endif
6382 off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, ARMV8_A64_REG_X3, RT_UOFFSETOF(VMCPUCC, iem.s.rcPassUp));
6383
6384 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
6385
6386 pu32CodeBuf[off++] = Armv8A64MkInstrOrr(ARMV8_A64_REG_X4, ARMV8_A64_REG_X3, ARMV8_A64_REG_X0, false /*f64Bit*/);
6387
6388 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, kIemNativeLabelType_NonZeroRetOrPassUp);
6389 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5);
6390 pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(true /*fJmpIfNotZero*/, 0, ARMV8_A64_REG_X4, false /*f64Bit*/);
6391
6392#else
6393# error "port me"
6394#endif
6395 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6396 RT_NOREF_PV(idxInstr);
6397 return off;
6398}
6399
6400
6401/**
6402 * Emits code to check if the content of @a idxAddrReg is a canonical address,
6403 * raising a \#GP(0) if it isn't.
6404 *
6405 * @returns New code buffer offset, UINT32_MAX on failure.
6406 * @param pReNative The native recompile state.
6407 * @param off The code buffer offset.
6408 * @param idxAddrReg The host register with the address to check.
6409 * @param idxInstr The current instruction.
6410 */
6411DECL_HIDDEN_THROW(uint32_t)
6412iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxAddrReg, uint8_t idxInstr)
6413{
6414 /*
6415 * Make sure we don't have any outstanding guest register writes as we may
6416 * raise an #GP(0) and all guest register must be up to date in CPUMCTX.
6417 */
6418 off = iemNativeRegFlushPendingWrites(pReNative, off);
6419
6420#ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
6421 off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
6422#else
6423 RT_NOREF(idxInstr);
6424#endif
6425
6426#ifdef RT_ARCH_AMD64
6427 /*
6428 * if ((((uint32_t)(a_u64Addr >> 32) + UINT32_C(0x8000)) >> 16) != 0)
6429 * return raisexcpt();
6430 * ---- this wariant avoid loading a 64-bit immediate, but is an instruction longer.
6431 */
6432 uint8_t const iTmpReg = iemNativeRegAllocTmp(pReNative, &off);
6433
6434 off = iemNativeEmitLoadGprFromGpr(pReNative, off, iTmpReg, idxAddrReg);
6435 off = iemNativeEmitShiftGprRight(pReNative, off, iTmpReg, 32);
6436 off = iemNativeEmitAddGpr32Imm(pReNative, off, iTmpReg, (int32_t)0x8000);
6437 off = iemNativeEmitShiftGprRight(pReNative, off, iTmpReg, 16);
6438 off = iemNativeEmitJnzToNewLabel(pReNative, off, kIemNativeLabelType_RaiseGp0);
6439
6440 iemNativeRegFreeTmp(pReNative, iTmpReg);
6441
6442#elif defined(RT_ARCH_ARM64)
6443 /*
6444 * if ((((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000)) >> 48) != 0)
6445 * return raisexcpt();
6446 * ----
6447 * mov x1, 0x800000000000
6448 * add x1, x0, x1
6449 * cmp xzr, x1, lsr 48
6450 * b.ne .Lraisexcpt
6451 */
6452 uint8_t const iTmpReg = iemNativeRegAllocTmp(pReNative, &off);
6453
6454 off = iemNativeEmitLoadGprImm64(pReNative, off, iTmpReg, UINT64_C(0x800000000000));
6455 off = iemNativeEmitAddTwoGprs(pReNative, off, iTmpReg, idxAddrReg);
6456 off = iemNativeEmitCmpArm64(pReNative, off, ARMV8_A64_REG_XZR, iTmpReg, true /*f64Bit*/, 48 /*cShift*/, kArmv8A64InstrShift_Lsr);
6457 off = iemNativeEmitJnzToNewLabel(pReNative, off, kIemNativeLabelType_RaiseGp0);
6458
6459 iemNativeRegFreeTmp(pReNative, iTmpReg);
6460
6461#else
6462# error "Port me"
6463#endif
6464 return off;
6465}
6466
6467
6468/**
6469 * Emits code to check if that the content of @a idxAddrReg is within the limit
6470 * of CS, raising a \#GP(0) if it isn't.
6471 *
6472 * @returns New code buffer offset; throws VBox status code on error.
6473 * @param pReNative The native recompile state.
6474 * @param off The code buffer offset.
6475 * @param idxAddrReg The host register (32-bit) with the address to
6476 * check.
6477 * @param idxInstr The current instruction.
6478 */
6479DECL_HIDDEN_THROW(uint32_t)
6480iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off,
6481 uint8_t idxAddrReg, uint8_t idxInstr)
6482{
6483 /*
6484 * Make sure we don't have any outstanding guest register writes as we may
6485 * raise an #GP(0) and all guest register must be up to date in CPUMCTX.
6486 */
6487 off = iemNativeRegFlushPendingWrites(pReNative, off);
6488
6489#ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
6490 off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
6491#else
6492 RT_NOREF(idxInstr);
6493#endif
6494
6495 uint8_t const idxRegCsLim = iemNativeRegAllocTmpForGuestReg(pReNative, &off,
6496 (IEMNATIVEGSTREG)(kIemNativeGstReg_SegLimitFirst + X86_SREG_CS),
6497 kIemNativeGstRegUse_ReadOnly);
6498
6499 off = iemNativeEmitCmpGpr32WithGpr(pReNative, off, idxAddrReg, idxRegCsLim);
6500 off = iemNativeEmitJaToNewLabel(pReNative, off, kIemNativeLabelType_RaiseGp0);
6501
6502 iemNativeRegFreeTmp(pReNative, idxRegCsLim);
6503 return off;
6504}
6505
6506
6507/**
6508 * Emits a call to a CImpl function or something similar.
6509 */
6510DECL_HIDDEN_THROW(uint32_t)
6511iemNativeEmitCImplCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr, uint64_t fGstShwFlush, uintptr_t pfnCImpl,
6512 uint8_t cbInstr, uint8_t cAddParams, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
6513{
6514 /* Writeback everything. */
6515 off = iemNativeRegFlushPendingWrites(pReNative, off);
6516
6517 /*
6518 * Flush stuff. PC and EFlags are implictly flushed, the latter because we
6519 * don't do with/without flags variants of defer-to-cimpl stuff at the moment.
6520 */
6521 fGstShwFlush = iemNativeCImplFlagsToGuestShadowFlushMask(pReNative->fCImpl,
6522 fGstShwFlush
6523 | RT_BIT_64(kIemNativeGstReg_Pc)
6524 | RT_BIT_64(kIemNativeGstReg_EFlags));
6525 iemNativeRegFlushGuestShadows(pReNative, fGstShwFlush);
6526
6527 off = iemNativeRegMoveAndFreeAndFlushAtCall(pReNative, off, 4);
6528
6529 /*
6530 * Load the parameters.
6531 */
6532#if defined(RT_OS_WINDOWS) && defined(VBOXSTRICTRC_STRICT_ENABLED)
6533 /* Special code the hidden VBOXSTRICTRC pointer. */
6534 off = iemNativeEmitLoadGprFromGpr( pReNative, off, IEMNATIVE_CALL_ARG1_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
6535 off = iemNativeEmitLoadGprImm64( pReNative, off, IEMNATIVE_CALL_ARG2_GREG, cbInstr); /** @todo 8-bit reg load opt for amd64 */
6536 if (cAddParams > 0)
6537 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, uParam0);
6538 if (cAddParams > 1)
6539 off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, uParam1);
6540 if (cAddParams > 2)
6541 off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG1, uParam2);
6542 off = iemNativeEmitLeaGprByBp(pReNative, off, X86_GREG_xCX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict */
6543
6544#else
6545 AssertCompile(IEMNATIVE_CALL_ARG_GREG_COUNT >= 4);
6546 off = iemNativeEmitLoadGprFromGpr( pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
6547 off = iemNativeEmitLoadGprImm64( pReNative, off, IEMNATIVE_CALL_ARG1_GREG, cbInstr); /** @todo 8-bit reg load opt for amd64 */
6548 if (cAddParams > 0)
6549 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, uParam0);
6550 if (cAddParams > 1)
6551 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, uParam1);
6552 if (cAddParams > 2)
6553# if IEMNATIVE_CALL_ARG_GREG_COUNT >= 5
6554 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG4_GREG, uParam2);
6555# else
6556 off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, uParam2);
6557# endif
6558#endif
6559
6560 /*
6561 * Make the call.
6562 */
6563 off = iemNativeEmitCallImm(pReNative, off, pfnCImpl);
6564
6565#if defined(RT_ARCH_AMD64) && defined(VBOXSTRICTRC_STRICT_ENABLED) && defined(RT_OS_WINDOWS)
6566 off = iemNativeEmitLoadGprByBpU32(pReNative, off, X86_GREG_xAX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict (see above) */
6567#endif
6568
6569 /*
6570 * Check the status code.
6571 */
6572 return iemNativeEmitCheckCallRetAndPassUp(pReNative, off, idxInstr);
6573}
6574
6575
6576/**
6577 * Emits a call to a threaded worker function.
6578 */
6579DECL_HIDDEN_THROW(uint32_t)
6580iemNativeEmitThreadedCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry)
6581{
6582 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, X86_EFL_STATUS_BITS);
6583
6584 /* We don't know what the threaded function is doing so we must flush all pending writes. */
6585 off = iemNativeRegFlushPendingWrites(pReNative, off);
6586
6587 iemNativeRegFlushGuestShadows(pReNative, UINT64_MAX); /** @todo optimize this */
6588 off = iemNativeRegMoveAndFreeAndFlushAtCall(pReNative, off, 4);
6589
6590#ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
6591 /* The threaded function may throw / long jmp, so set current instruction
6592 number if we're counting. */
6593 off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, pCallEntry->idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));
6594#endif
6595
6596 uint8_t const cParams = g_acIemThreadedFunctionUsedArgs[pCallEntry->enmFunction];
6597
6598#ifdef RT_ARCH_AMD64
6599 /* Load the parameters and emit the call. */
6600# ifdef RT_OS_WINDOWS
6601# ifndef VBOXSTRICTRC_STRICT_ENABLED
6602 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xCX, IEMNATIVE_REG_FIXED_PVMCPU);
6603 if (cParams > 0)
6604 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xDX, pCallEntry->auParams[0]);
6605 if (cParams > 1)
6606 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x8, pCallEntry->auParams[1]);
6607 if (cParams > 2)
6608 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x9, pCallEntry->auParams[2]);
6609# else /* VBOXSTRICTRC: Returned via hidden parameter. Sigh. */
6610 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, IEMNATIVE_REG_FIXED_PVMCPU);
6611 if (cParams > 0)
6612 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x8, pCallEntry->auParams[0]);
6613 if (cParams > 1)
6614 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x9, pCallEntry->auParams[1]);
6615 if (cParams > 2)
6616 {
6617 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x10, pCallEntry->auParams[2]);
6618 off = iemNativeEmitStoreGprByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, X86_GREG_x10);
6619 }
6620 off = iemNativeEmitLeaGprByBp(pReNative, off, X86_GREG_xCX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict */
6621# endif /* VBOXSTRICTRC_STRICT_ENABLED */
6622# else
6623 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDI, IEMNATIVE_REG_FIXED_PVMCPU);
6624 if (cParams > 0)
6625 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xSI, pCallEntry->auParams[0]);
6626 if (cParams > 1)
6627 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xDX, pCallEntry->auParams[1]);
6628 if (cParams > 2)
6629 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xCX, pCallEntry->auParams[2]);
6630# endif
6631
6632 off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)g_apfnIemThreadedFunctions[pCallEntry->enmFunction]);
6633
6634# if defined(VBOXSTRICTRC_STRICT_ENABLED) && defined(RT_OS_WINDOWS)
6635 off = iemNativeEmitLoadGprByBpU32(pReNative, off, X86_GREG_xAX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict (see above) */
6636# endif
6637
6638#elif RT_ARCH_ARM64
6639 /*
6640 * ARM64:
6641 */
6642 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
6643 if (cParams > 0)
6644 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, pCallEntry->auParams[0]);
6645 if (cParams > 1)
6646 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, pCallEntry->auParams[1]);
6647 if (cParams > 2)
6648 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, pCallEntry->auParams[2]);
6649
6650 off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)g_apfnIemThreadedFunctions[pCallEntry->enmFunction]);
6651
6652#else
6653# error "port me"
6654#endif
6655
6656 /*
6657 * Check the status code.
6658 */
6659 off = iemNativeEmitCheckCallRetAndPassUp(pReNative, off, pCallEntry->idxInstr);
6660
6661 return off;
6662}
6663
6664#ifdef VBOX_WITH_STATISTICS
6665/**
6666 * Emits code to update the thread call statistics.
6667 */
6668DECL_INLINE_THROW(uint32_t)
6669iemNativeEmitThreadCallStats(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry)
6670{
6671 /*
6672 * Update threaded function stats.
6673 */
6674 uint32_t const offVCpu = RT_UOFFSETOF_DYN(VMCPUCC, iem.s.acThreadedFuncStats[pCallEntry->enmFunction]);
6675 AssertCompile(sizeof(pReNative->pVCpu->iem.s.acThreadedFuncStats[pCallEntry->enmFunction]) == sizeof(uint32_t));
6676# if defined(RT_ARCH_ARM64)
6677 uint8_t const idxTmp1 = iemNativeRegAllocTmp(pReNative, &off);
6678 uint8_t const idxTmp2 = iemNativeRegAllocTmp(pReNative, &off);
6679 off = iemNativeEmitIncU32CounterInVCpu(pReNative, off, idxTmp1, idxTmp2, offVCpu);
6680 iemNativeRegFreeTmp(pReNative, idxTmp1);
6681 iemNativeRegFreeTmp(pReNative, idxTmp2);
6682# else
6683 off = iemNativeEmitIncU32CounterInVCpu(pReNative, off, UINT8_MAX, UINT8_MAX, offVCpu);
6684# endif
6685 return off;
6686}
6687#endif /* VBOX_WITH_STATISTICS */
6688
6689
6690/**
6691 * Emits the code at the ReturnWithFlags label (returns
6692 * VINF_IEM_REEXEC_FINISH_WITH_FLAGS).
6693 */
6694static uint32_t iemNativeEmitReturnWithFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel)
6695{
6696 uint32_t const idxLabel = iemNativeLabelFind(pReNative, kIemNativeLabelType_ReturnWithFlags);
6697 if (idxLabel != UINT32_MAX)
6698 {
6699 iemNativeLabelDefine(pReNative, idxLabel, off);
6700
6701 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_RET_GREG, VINF_IEM_REEXEC_FINISH_WITH_FLAGS);
6702
6703 /* jump back to the return sequence. */
6704 off = iemNativeEmitJmpToLabel(pReNative, off, idxReturnLabel);
6705 }
6706 return off;
6707}
6708
6709
6710/**
6711 * Emits the code at the ReturnBreak label (returns VINF_IEM_REEXEC_BREAK).
6712 */
6713static uint32_t iemNativeEmitReturnBreak(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel)
6714{
6715 uint32_t const idxLabel = iemNativeLabelFind(pReNative, kIemNativeLabelType_ReturnBreak);
6716 if (idxLabel != UINT32_MAX)
6717 {
6718 iemNativeLabelDefine(pReNative, idxLabel, off);
6719
6720 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_RET_GREG, VINF_IEM_REEXEC_BREAK);
6721
6722 /* jump back to the return sequence. */
6723 off = iemNativeEmitJmpToLabel(pReNative, off, idxReturnLabel);
6724 }
6725 return off;
6726}
6727
6728
6729/**
6730 * Emits the RC fiddling code for handling non-zero return code or rcPassUp.
6731 */
6732static uint32_t iemNativeEmitRcFiddling(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel)
6733{
6734 /*
6735 * Generate the rc + rcPassUp fiddling code if needed.
6736 */
6737 uint32_t const idxLabel = iemNativeLabelFind(pReNative, kIemNativeLabelType_NonZeroRetOrPassUp);
6738 if (idxLabel != UINT32_MAX)
6739 {
6740 iemNativeLabelDefine(pReNative, idxLabel, off);
6741
6742 /* iemNativeHlpExecStatusCodeFiddling(PVMCPUCC pVCpu, int rc, uint8_t idxInstr) */
6743#ifdef RT_ARCH_AMD64
6744# ifdef RT_OS_WINDOWS
6745# ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
6746 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_x8, X86_GREG_xCX); /* cl = instruction number */
6747# endif
6748 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xCX, IEMNATIVE_REG_FIXED_PVMCPU);
6749 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, X86_GREG_xAX);
6750# else
6751 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDI, IEMNATIVE_REG_FIXED_PVMCPU);
6752 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xSI, X86_GREG_xAX);
6753# ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING
6754 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, X86_GREG_xCX); /* cl = instruction number */
6755# endif
6756# endif
6757# ifndef IEMNATIVE_WITH_INSTRUCTION_COUNTING
6758 off = iemNativeEmitLoadGpr8Imm(pReNative, off, X86_GREG_xCX, 0);
6759# endif
6760
6761#else
6762 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, IEMNATIVE_CALL_RET_GREG);
6763 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
6764 /* IEMNATIVE_CALL_ARG2_GREG is already set. */
6765#endif
6766
6767 off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)iemNativeHlpExecStatusCodeFiddling);
6768 off = iemNativeEmitJmpToLabel(pReNative, off, idxReturnLabel);
6769 }
6770 return off;
6771}
6772
6773
6774/**
6775 * Emits a standard epilog.
6776 */
6777static uint32_t iemNativeEmitEpilog(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t *pidxReturnLabel)
6778{
6779 *pidxReturnLabel = UINT32_MAX;
6780
6781 /* Flush any pending writes before returning from the last instruction (RIP updates, etc.). */
6782 off = iemNativeRegFlushPendingWrites(pReNative, off);
6783
6784 /*
6785 * Successful return, so clear the return register (eax, w0).
6786 */
6787 off = iemNativeEmitGprZero(pReNative,off, IEMNATIVE_CALL_RET_GREG);
6788
6789 /*
6790 * Define label for common return point.
6791 */
6792 uint32_t const idxReturn = iemNativeLabelCreate(pReNative, kIemNativeLabelType_Return, off);
6793 *pidxReturnLabel = idxReturn;
6794
6795 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, X86_EFL_STATUS_BITS);
6796
6797 /*
6798 * Restore registers and return.
6799 */
6800#ifdef RT_ARCH_AMD64
6801 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20);
6802
6803 /* Reposition esp at the r15 restore point. */
6804 pbCodeBuf[off++] = X86_OP_REX_W;
6805 pbCodeBuf[off++] = 0x8d; /* lea rsp, [rbp - (gcc ? 5 : 7) * 8] */
6806 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, X86_GREG_xSP, X86_GREG_xBP);
6807 pbCodeBuf[off++] = (uint8_t)IEMNATIVE_FP_OFF_LAST_PUSH;
6808
6809 /* Pop non-volatile registers and return */
6810 pbCodeBuf[off++] = X86_OP_REX_B; /* pop r15 */
6811 pbCodeBuf[off++] = 0x58 + X86_GREG_x15 - 8;
6812 pbCodeBuf[off++] = X86_OP_REX_B; /* pop r14 */
6813 pbCodeBuf[off++] = 0x58 + X86_GREG_x14 - 8;
6814 pbCodeBuf[off++] = X86_OP_REX_B; /* pop r13 */
6815 pbCodeBuf[off++] = 0x58 + X86_GREG_x13 - 8;
6816 pbCodeBuf[off++] = X86_OP_REX_B; /* pop r12 */
6817 pbCodeBuf[off++] = 0x58 + X86_GREG_x12 - 8;
6818# ifdef RT_OS_WINDOWS
6819 pbCodeBuf[off++] = 0x58 + X86_GREG_xDI; /* pop rdi */
6820 pbCodeBuf[off++] = 0x58 + X86_GREG_xSI; /* pop rsi */
6821# endif
6822 pbCodeBuf[off++] = 0x58 + X86_GREG_xBX; /* pop rbx */
6823 pbCodeBuf[off++] = 0xc9; /* leave */
6824 pbCodeBuf[off++] = 0xc3; /* ret */
6825 pbCodeBuf[off++] = 0xcc; /* int3 poison */
6826
6827#elif RT_ARCH_ARM64
6828 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
6829
6830 /* ldp x19, x20, [sp #IEMNATIVE_FRAME_VAR_SIZE]! ; Unallocate the variable space and restore x19+x20. */
6831 AssertCompile(IEMNATIVE_FRAME_VAR_SIZE < 64*8);
6832 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_PreIndex,
6833 ARMV8_A64_REG_X19, ARMV8_A64_REG_X20, ARMV8_A64_REG_SP,
6834 IEMNATIVE_FRAME_VAR_SIZE / 8);
6835 /* Restore x21 thru x28 + BP and LR (ret address) (SP remains unchanged in the kSigned variant). */
6836 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
6837 ARMV8_A64_REG_X21, ARMV8_A64_REG_X22, ARMV8_A64_REG_SP, 2);
6838 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
6839 ARMV8_A64_REG_X23, ARMV8_A64_REG_X24, ARMV8_A64_REG_SP, 4);
6840 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
6841 ARMV8_A64_REG_X25, ARMV8_A64_REG_X26, ARMV8_A64_REG_SP, 6);
6842 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
6843 ARMV8_A64_REG_X27, ARMV8_A64_REG_X28, ARMV8_A64_REG_SP, 8);
6844 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
6845 ARMV8_A64_REG_BP, ARMV8_A64_REG_LR, ARMV8_A64_REG_SP, 10);
6846 AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE / 8 == 12);
6847
6848 /* add sp, sp, IEMNATIVE_FRAME_SAVE_REG_SIZE ; */
6849 AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE < 4096);
6850 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, ARMV8_A64_REG_SP, ARMV8_A64_REG_SP,
6851 IEMNATIVE_FRAME_SAVE_REG_SIZE);
6852
6853 /* retab / ret */
6854# ifdef RT_OS_DARWIN /** @todo See todo on pacibsp in the prolog. */
6855 if (1)
6856 pu32CodeBuf[off++] = ARMV8_A64_INSTR_RETAB;
6857 else
6858# endif
6859 pu32CodeBuf[off++] = ARMV8_A64_INSTR_RET;
6860
6861#else
6862# error "port me"
6863#endif
6864 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6865
6866 return iemNativeEmitRcFiddling(pReNative, off, idxReturn);
6867}
6868
6869
6870/**
6871 * Emits a standard prolog.
6872 */
6873static uint32_t iemNativeEmitProlog(PIEMRECOMPILERSTATE pReNative, uint32_t off)
6874{
6875#ifdef RT_ARCH_AMD64
6876 /*
6877 * Set up a regular xBP stack frame, pushing all non-volatile GPRs,
6878 * reserving 64 bytes for stack variables plus 4 non-register argument
6879 * slots. Fixed register assignment: xBX = pReNative;
6880 *
6881 * Since we always do the same register spilling, we can use the same
6882 * unwind description for all the code.
6883 */
6884 uint8_t *const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);
6885 pbCodeBuf[off++] = 0x50 + X86_GREG_xBP; /* push rbp */
6886 pbCodeBuf[off++] = X86_OP_REX_W; /* mov rbp, rsp */
6887 pbCodeBuf[off++] = 0x8b;
6888 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, X86_GREG_xBP, X86_GREG_xSP);
6889 pbCodeBuf[off++] = 0x50 + X86_GREG_xBX; /* push rbx */
6890 AssertCompile(IEMNATIVE_REG_FIXED_PVMCPU == X86_GREG_xBX);
6891# ifdef RT_OS_WINDOWS
6892 pbCodeBuf[off++] = X86_OP_REX_W; /* mov rbx, rcx ; RBX = pVCpu */
6893 pbCodeBuf[off++] = 0x8b;
6894 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, X86_GREG_xBX, X86_GREG_xCX);
6895 pbCodeBuf[off++] = 0x50 + X86_GREG_xSI; /* push rsi */
6896 pbCodeBuf[off++] = 0x50 + X86_GREG_xDI; /* push rdi */
6897# else
6898 pbCodeBuf[off++] = X86_OP_REX_W; /* mov rbx, rdi ; RBX = pVCpu */
6899 pbCodeBuf[off++] = 0x8b;
6900 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, X86_GREG_xBX, X86_GREG_xDI);
6901# endif
6902 pbCodeBuf[off++] = X86_OP_REX_B; /* push r12 */
6903 pbCodeBuf[off++] = 0x50 + X86_GREG_x12 - 8;
6904 pbCodeBuf[off++] = X86_OP_REX_B; /* push r13 */
6905 pbCodeBuf[off++] = 0x50 + X86_GREG_x13 - 8;
6906 pbCodeBuf[off++] = X86_OP_REX_B; /* push r14 */
6907 pbCodeBuf[off++] = 0x50 + X86_GREG_x14 - 8;
6908 pbCodeBuf[off++] = X86_OP_REX_B; /* push r15 */
6909 pbCodeBuf[off++] = 0x50 + X86_GREG_x15 - 8;
6910
6911# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
6912 /* Save the frame pointer. */
6913 off = iemNativeEmitStoreGprToVCpuU64Ex(pbCodeBuf, off, X86_GREG_xBP, RT_UOFFSETOF(VMCPUCC, iem.s.pvTbFramePointerR3));
6914# endif
6915
6916 off = iemNativeEmitSubGprImm(pReNative, off, /* sub rsp, byte 28h */
6917 X86_GREG_xSP,
6918 IEMNATIVE_FRAME_ALIGN_SIZE
6919 + IEMNATIVE_FRAME_VAR_SIZE
6920 + IEMNATIVE_FRAME_STACK_ARG_COUNT * 8
6921 + IEMNATIVE_FRAME_SHADOW_ARG_COUNT * 8);
6922 AssertCompile(!(IEMNATIVE_FRAME_VAR_SIZE & 0xf));
6923 AssertCompile(!(IEMNATIVE_FRAME_STACK_ARG_COUNT & 0x1));
6924 AssertCompile(!(IEMNATIVE_FRAME_SHADOW_ARG_COUNT & 0x1));
6925
6926#elif RT_ARCH_ARM64
6927 /*
6928 * We set up a stack frame exactly like on x86, only we have to push the
6929 * return address our selves here. We save all non-volatile registers.
6930 */
6931 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 16);
6932
6933# ifdef RT_OS_DARWIN /** @todo This seems to be requirement by libunwind for JIT FDEs. Investigate further as been unable
6934 * to figure out where the BRK following AUTHB*+XPACB* stuff comes from in libunwind. It's
6935 * definitely the dwarf stepping code, but till found it's very tedious to figure out whether it's
6936 * in any way conditional, so just emitting this instructions now and hoping for the best... */
6937 /* pacibsp */
6938 pu32CodeBuf[off++] = ARMV8_A64_INSTR_PACIBSP;
6939# endif
6940
6941 /* stp x19, x20, [sp, #-IEMNATIVE_FRAME_SAVE_REG_SIZE] ; Allocate space for saving registers and place x19+x20 at the bottom. */
6942 AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE < 64*8);
6943 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_PreIndex,
6944 ARMV8_A64_REG_X19, ARMV8_A64_REG_X20, ARMV8_A64_REG_SP,
6945 -IEMNATIVE_FRAME_SAVE_REG_SIZE / 8);
6946 /* Save x21 thru x28 (SP remains unchanged in the kSigned variant). */
6947 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
6948 ARMV8_A64_REG_X21, ARMV8_A64_REG_X22, ARMV8_A64_REG_SP, 2);
6949 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
6950 ARMV8_A64_REG_X23, ARMV8_A64_REG_X24, ARMV8_A64_REG_SP, 4);
6951 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
6952 ARMV8_A64_REG_X25, ARMV8_A64_REG_X26, ARMV8_A64_REG_SP, 6);
6953 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
6954 ARMV8_A64_REG_X27, ARMV8_A64_REG_X28, ARMV8_A64_REG_SP, 8);
6955 /* Save the BP and LR (ret address) registers at the top of the frame. */
6956 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
6957 ARMV8_A64_REG_BP, ARMV8_A64_REG_LR, ARMV8_A64_REG_SP, 10);
6958 AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE / 8 == 12);
6959 /* add bp, sp, IEMNATIVE_FRAME_SAVE_REG_SIZE - 16 ; Set BP to point to the old BP stack address. */
6960 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, ARMV8_A64_REG_BP,
6961 ARMV8_A64_REG_SP, IEMNATIVE_FRAME_SAVE_REG_SIZE - 16);
6962
6963 /* sub sp, sp, IEMNATIVE_FRAME_VAR_SIZE ; Allocate the variable area from SP. */
6964 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_SP, ARMV8_A64_REG_SP, IEMNATIVE_FRAME_VAR_SIZE);
6965
6966 /* mov r28, r0 */
6967 off = iemNativeEmitLoadGprFromGprEx(pu32CodeBuf, off, IEMNATIVE_REG_FIXED_PVMCPU, IEMNATIVE_CALL_ARG0_GREG);
6968 /* mov r27, r1 */
6969 off = iemNativeEmitLoadGprFromGprEx(pu32CodeBuf, off, IEMNATIVE_REG_FIXED_PCPUMCTX, IEMNATIVE_CALL_ARG1_GREG);
6970
6971# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
6972 /* Save the frame pointer. */
6973 off = iemNativeEmitStoreGprToVCpuU64Ex(pu32CodeBuf, off, ARMV8_A64_REG_BP, RT_UOFFSETOF(VMCPUCC, iem.s.pvTbFramePointerR3),
6974 ARMV8_A64_REG_X2);
6975# endif
6976
6977#else
6978# error "port me"
6979#endif
6980 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6981 return off;
6982}
6983
6984
6985/*********************************************************************************************************************************
6986* Emitters for IEM_MC_ARG_XXX, IEM_MC_LOCAL, IEM_MC_LOCAL_CONST, ++ *
6987*********************************************************************************************************************************/
6988
6989/**
6990 * Internal work that allocates a variable with kind set to
6991 * kIemNativeVarKind_Invalid and no current stack allocation.
6992 *
6993 * The kind will either be set by the caller or later when the variable is first
6994 * assigned a value.
6995 *
6996 * @returns Unpacked index.
6997 * @internal
6998 */
6999static uint8_t iemNativeVarAllocInt(PIEMRECOMPILERSTATE pReNative, uint8_t cbType)
7000{
7001 Assert(cbType > 0 && cbType <= 64);
7002 unsigned const idxVar = ASMBitFirstSetU32(~pReNative->Core.bmVars) - 1;
7003 AssertStmt(idxVar < RT_ELEMENTS(pReNative->Core.aVars), IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_EXHAUSTED));
7004 pReNative->Core.bmVars |= RT_BIT_32(idxVar);
7005 pReNative->Core.aVars[idxVar].enmKind = kIemNativeVarKind_Invalid;
7006 pReNative->Core.aVars[idxVar].cbVar = cbType;
7007 pReNative->Core.aVars[idxVar].idxStackSlot = UINT8_MAX;
7008 pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX;
7009 pReNative->Core.aVars[idxVar].uArgNo = UINT8_MAX;
7010 pReNative->Core.aVars[idxVar].idxReferrerVar = UINT8_MAX;
7011 pReNative->Core.aVars[idxVar].enmGstReg = kIemNativeGstReg_End;
7012 pReNative->Core.aVars[idxVar].fRegAcquired = false;
7013 pReNative->Core.aVars[idxVar].u.uValue = 0;
7014#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
7015 pReNative->Core.aVars[idxVar].fSimdReg = false;
7016#endif
7017 return idxVar;
7018}
7019
7020
7021/**
7022 * Internal work that allocates an argument variable w/o setting enmKind.
7023 *
7024 * @returns Unpacked index.
7025 * @internal
7026 */
7027static uint8_t iemNativeArgAllocInt(PIEMRECOMPILERSTATE pReNative, uint8_t iArgNo, uint8_t cbType)
7028{
7029 iArgNo += iemNativeArgGetHiddenArgCount(pReNative);
7030 AssertStmt(iArgNo < RT_ELEMENTS(pReNative->Core.aidxArgVars), IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_1));
7031 AssertStmt(pReNative->Core.aidxArgVars[iArgNo] == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_DUP_ARG_NO));
7032
7033 uint8_t const idxVar = iemNativeVarAllocInt(pReNative, cbType);
7034 pReNative->Core.aidxArgVars[iArgNo] = idxVar; /* (unpacked) */
7035 pReNative->Core.aVars[idxVar].uArgNo = iArgNo;
7036 return idxVar;
7037}
7038
7039
7040/**
7041 * Gets the stack slot for a stack variable, allocating one if necessary.
7042 *
7043 * Calling this function implies that the stack slot will contain a valid
7044 * variable value. The caller deals with any register currently assigned to the
7045 * variable, typically by spilling it into the stack slot.
7046 *
7047 * @returns The stack slot number.
7048 * @param pReNative The recompiler state.
7049 * @param idxVar The variable.
7050 * @throws VERR_IEM_VAR_OUT_OF_STACK_SLOTS
7051 */
7052DECL_HIDDEN_THROW(uint8_t) iemNativeVarGetStackSlot(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
7053{
7054 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7055 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
7056 Assert(pVar->enmKind == kIemNativeVarKind_Stack);
7057
7058 /* Already got a slot? */
7059 uint8_t const idxStackSlot = pVar->idxStackSlot;
7060 if (idxStackSlot != UINT8_MAX)
7061 {
7062 Assert(idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS);
7063 return idxStackSlot;
7064 }
7065
7066 /*
7067 * A single slot is easy to allocate.
7068 * Allocate them from the top end, closest to BP, to reduce the displacement.
7069 */
7070 if (pVar->cbVar <= sizeof(uint64_t))
7071 {
7072 unsigned const iSlot = ASMBitLastSetU32(~pReNative->Core.bmStack) - 1;
7073 AssertStmt(iSlot < IEMNATIVE_FRAME_VAR_SLOTS, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_OUT_OF_STACK_SLOTS));
7074 pReNative->Core.bmStack |= RT_BIT_32(iSlot);
7075 pVar->idxStackSlot = (uint8_t)iSlot;
7076 Log11(("iemNativeVarGetStackSlot: idxVar=%#x iSlot=%#x\n", idxVar, iSlot));
7077 return (uint8_t)iSlot;
7078 }
7079
7080 /*
7081 * We need more than one stack slot.
7082 *
7083 * cbVar -> fBitAlignMask: 16 -> 1; 32 -> 3; 64 -> 7;
7084 */
7085 AssertCompile(RT_IS_POWER_OF_TWO(IEMNATIVE_FRAME_VAR_SLOTS)); /* If not we have to add an overflow check. */
7086 Assert(pVar->cbVar <= 64);
7087 uint32_t const fBitAlignMask = RT_BIT_32(ASMBitLastSetU32(pVar->cbVar) - 4) - 1;
7088 uint32_t fBitAllocMask = RT_BIT_32((pVar->cbVar + 7) >> 3) - 1;
7089 uint32_t bmStack = pReNative->Core.bmStack;
7090 while (bmStack != UINT32_MAX)
7091 {
7092 unsigned iSlot = ASMBitLastSetU32(~bmStack);
7093 AssertStmt(iSlot, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_OUT_OF_STACK_SLOTS));
7094 iSlot = (iSlot - 1) & ~fBitAlignMask;
7095 if ((bmStack & ~(fBitAllocMask << iSlot)) == bmStack)
7096 {
7097 pReNative->Core.bmStack |= (fBitAllocMask << iSlot);
7098 pVar->idxStackSlot = (uint8_t)iSlot;
7099 Log11(("iemNativeVarGetStackSlot: idxVar=%#x iSlot=%#x/%#x (cbVar=%#x)\n",
7100 idxVar, iSlot, fBitAllocMask, pVar->cbVar));
7101 return (uint8_t)iSlot;
7102 }
7103
7104 bmStack |= (fBitAllocMask << iSlot);
7105 }
7106 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_OUT_OF_STACK_SLOTS));
7107}
7108
7109
7110/**
7111 * Changes the variable to a stack variable.
7112 *
7113 * Currently this is s only possible to do the first time the variable is used,
7114 * switching later is can be implemented but not done.
7115 *
7116 * @param pReNative The recompiler state.
7117 * @param idxVar The variable.
7118 * @throws VERR_IEM_VAR_IPE_2
7119 */
7120DECL_HIDDEN_THROW(void) iemNativeVarSetKindToStack(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
7121{
7122 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7123 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
7124 if (pVar->enmKind != kIemNativeVarKind_Stack)
7125 {
7126 /* We could in theory transition from immediate to stack as well, but it
7127 would involve the caller doing work storing the value on the stack. So,
7128 till that's required we only allow transition from invalid. */
7129 AssertStmt(pVar->enmKind == kIemNativeVarKind_Invalid, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
7130 AssertStmt(pVar->idxReg == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
7131 pVar->enmKind = kIemNativeVarKind_Stack;
7132
7133 /* Note! We don't allocate a stack slot here, that's only done when a
7134 slot is actually needed to hold a variable value. */
7135 }
7136}
7137
7138
7139/**
7140 * Sets it to a variable with a constant value.
7141 *
7142 * This does not require stack storage as we know the value and can always
7143 * reload it, unless of course it's referenced.
7144 *
7145 * @param pReNative The recompiler state.
7146 * @param idxVar The variable.
7147 * @param uValue The immediate value.
7148 * @throws VERR_IEM_VAR_OUT_OF_STACK_SLOTS, VERR_IEM_VAR_IPE_2
7149 */
7150DECL_HIDDEN_THROW(void) iemNativeVarSetKindToConst(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, uint64_t uValue)
7151{
7152 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7153 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
7154 if (pVar->enmKind != kIemNativeVarKind_Immediate)
7155 {
7156 /* Only simple transitions for now. */
7157 AssertStmt(pVar->enmKind == kIemNativeVarKind_Invalid, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
7158 pVar->enmKind = kIemNativeVarKind_Immediate;
7159 }
7160 AssertStmt(pVar->idxReg == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
7161
7162 pVar->u.uValue = uValue;
7163 AssertMsg( pVar->cbVar >= sizeof(uint64_t)
7164 || pVar->u.uValue < RT_BIT_64(pVar->cbVar * 8),
7165 ("idxVar=%d cbVar=%u uValue=%#RX64\n", idxVar, pVar->cbVar, uValue));
7166}
7167
7168
7169/**
7170 * Sets the variable to a reference (pointer) to @a idxOtherVar.
7171 *
7172 * This does not require stack storage as we know the value and can always
7173 * reload it. Loading is postponed till needed.
7174 *
7175 * @param pReNative The recompiler state.
7176 * @param idxVar The variable. Unpacked.
7177 * @param idxOtherVar The variable to take the (stack) address of. Unpacked.
7178 *
7179 * @throws VERR_IEM_VAR_OUT_OF_STACK_SLOTS, VERR_IEM_VAR_IPE_2
7180 * @internal
7181 */
7182static void iemNativeVarSetKindToLocalRef(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, uint8_t idxOtherVar)
7183{
7184 Assert(idxVar < RT_ELEMENTS(pReNative->Core.aVars) && (pReNative->Core.bmVars & RT_BIT_32(idxVar)));
7185 Assert(idxOtherVar < RT_ELEMENTS(pReNative->Core.aVars) && (pReNative->Core.bmVars & RT_BIT_32(idxOtherVar)));
7186
7187 if (pReNative->Core.aVars[idxVar].enmKind != kIemNativeVarKind_VarRef)
7188 {
7189 /* Only simple transitions for now. */
7190 AssertStmt(pReNative->Core.aVars[idxVar].enmKind == kIemNativeVarKind_Invalid,
7191 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
7192 pReNative->Core.aVars[idxVar].enmKind = kIemNativeVarKind_VarRef;
7193 }
7194 AssertStmt(pReNative->Core.aVars[idxVar].idxReg == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
7195
7196 pReNative->Core.aVars[idxVar].u.idxRefVar = idxOtherVar; /* unpacked */
7197
7198 /* Update the other variable, ensure it's a stack variable. */
7199 /** @todo handle variables with const values... that'll go boom now. */
7200 pReNative->Core.aVars[idxOtherVar].idxReferrerVar = idxVar;
7201 iemNativeVarSetKindToStack(pReNative, IEMNATIVE_VAR_IDX_PACK(idxOtherVar));
7202}
7203
7204
7205/**
7206 * Sets the variable to a reference (pointer) to a guest register reference.
7207 *
7208 * This does not require stack storage as we know the value and can always
7209 * reload it. Loading is postponed till needed.
7210 *
7211 * @param pReNative The recompiler state.
7212 * @param idxVar The variable.
7213 * @param enmRegClass The class guest registers to reference.
7214 * @param idxReg The register within @a enmRegClass to reference.
7215 *
7216 * @throws VERR_IEM_VAR_IPE_2
7217 */
7218DECL_HIDDEN_THROW(void) iemNativeVarSetKindToGstRegRef(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar,
7219 IEMNATIVEGSTREGREF enmRegClass, uint8_t idxReg)
7220{
7221 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7222 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
7223
7224 if (pVar->enmKind != kIemNativeVarKind_GstRegRef)
7225 {
7226 /* Only simple transitions for now. */
7227 AssertStmt(pVar->enmKind == kIemNativeVarKind_Invalid, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
7228 pVar->enmKind = kIemNativeVarKind_GstRegRef;
7229 }
7230 AssertStmt(pVar->idxReg == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_2));
7231
7232 pVar->u.GstRegRef.enmClass = enmRegClass;
7233 pVar->u.GstRegRef.idx = idxReg;
7234}
7235
7236
7237DECL_HIDDEN_THROW(uint8_t) iemNativeArgAlloc(PIEMRECOMPILERSTATE pReNative, uint8_t iArgNo, uint8_t cbType)
7238{
7239 return IEMNATIVE_VAR_IDX_PACK(iemNativeArgAllocInt(pReNative, iArgNo, cbType));
7240}
7241
7242
7243DECL_HIDDEN_THROW(uint8_t) iemNativeArgAllocConst(PIEMRECOMPILERSTATE pReNative, uint8_t iArgNo, uint8_t cbType, uint64_t uValue)
7244{
7245 uint8_t const idxVar = IEMNATIVE_VAR_IDX_PACK(iemNativeArgAllocInt(pReNative, iArgNo, cbType));
7246
7247 /* Since we're using a generic uint64_t value type, we must truncate it if
7248 the variable is smaller otherwise we may end up with too large value when
7249 scaling up a imm8 w/ sign-extension.
7250
7251 This caused trouble with a "add bx, 0xffff" instruction (around f000:ac60
7252 in the bios, bx=1) when running on arm, because clang expect 16-bit
7253 register parameters to have bits 16 and up set to zero. Instead of
7254 setting x1 = 0xffff we ended up with x1 = 0xffffffffffffff and the wrong
7255 CF value in the result. */
7256 switch (cbType)
7257 {
7258 case sizeof(uint8_t): uValue &= UINT64_C(0xff); break;
7259 case sizeof(uint16_t): uValue &= UINT64_C(0xffff); break;
7260 case sizeof(uint32_t): uValue &= UINT64_C(0xffffffff); break;
7261 }
7262 iemNativeVarSetKindToConst(pReNative, idxVar, uValue);
7263 return idxVar;
7264}
7265
7266
7267DECL_HIDDEN_THROW(uint8_t) iemNativeArgAllocLocalRef(PIEMRECOMPILERSTATE pReNative, uint8_t iArgNo, uint8_t idxOtherVar)
7268{
7269 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxOtherVar);
7270 idxOtherVar = IEMNATIVE_VAR_IDX_UNPACK(idxOtherVar);
7271 AssertStmt( idxOtherVar < RT_ELEMENTS(pReNative->Core.aVars)
7272 && (pReNative->Core.bmVars & RT_BIT_32(idxOtherVar))
7273 && pReNative->Core.aVars[idxOtherVar].uArgNo == UINT8_MAX,
7274 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_1));
7275
7276 uint8_t const idxArgVar = iemNativeArgAlloc(pReNative, iArgNo, sizeof(uintptr_t));
7277 iemNativeVarSetKindToLocalRef(pReNative, IEMNATIVE_VAR_IDX_UNPACK(idxArgVar), idxOtherVar);
7278 return idxArgVar;
7279}
7280
7281
7282DECL_HIDDEN_THROW(uint8_t) iemNativeVarAlloc(PIEMRECOMPILERSTATE pReNative, uint8_t cbType)
7283{
7284 uint8_t const idxVar = IEMNATIVE_VAR_IDX_PACK(iemNativeVarAllocInt(pReNative, cbType));
7285 /* Don't set to stack now, leave that to the first use as for instance
7286 IEM_MC_CALC_RM_EFF_ADDR may produce a const/immediate result (esp. in DOS). */
7287 return idxVar;
7288}
7289
7290
7291DECL_HIDDEN_THROW(uint8_t) iemNativeVarAllocConst(PIEMRECOMPILERSTATE pReNative, uint8_t cbType, uint64_t uValue)
7292{
7293 uint8_t const idxVar = IEMNATIVE_VAR_IDX_PACK(iemNativeVarAllocInt(pReNative, cbType));
7294
7295 /* Since we're using a generic uint64_t value type, we must truncate it if
7296 the variable is smaller otherwise we may end up with too large value when
7297 scaling up a imm8 w/ sign-extension. */
7298 switch (cbType)
7299 {
7300 case sizeof(uint8_t): uValue &= UINT64_C(0xff); break;
7301 case sizeof(uint16_t): uValue &= UINT64_C(0xffff); break;
7302 case sizeof(uint32_t): uValue &= UINT64_C(0xffffffff); break;
7303 }
7304 iemNativeVarSetKindToConst(pReNative, idxVar, uValue);
7305 return idxVar;
7306}
7307
7308
7309/**
7310 * Makes sure variable @a idxVar has a register assigned to it and that it stays
7311 * fixed till we call iemNativeVarRegisterRelease.
7312 *
7313 * @returns The host register number.
7314 * @param pReNative The recompiler state.
7315 * @param idxVar The variable.
7316 * @param poff Pointer to the instruction buffer offset.
7317 * In case a register needs to be freed up or the value
7318 * loaded off the stack.
7319 * @param fInitialized Set if the variable must already have been initialized.
7320 * Will throw VERR_IEM_VAR_NOT_INITIALIZED if this is not
7321 * the case.
7322 * @param idxRegPref Preferred register number or UINT8_MAX.
7323 */
7324DECL_HIDDEN_THROW(uint8_t) iemNativeVarRegisterAcquire(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, uint32_t *poff,
7325 bool fInitialized /*= false*/, uint8_t idxRegPref /*= UINT8_MAX*/)
7326{
7327 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7328 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
7329 Assert(pVar->cbVar <= 8);
7330 Assert(!pVar->fRegAcquired);
7331
7332 uint8_t idxReg = pVar->idxReg;
7333 if (idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
7334 {
7335 Assert( pVar->enmKind > kIemNativeVarKind_Invalid
7336 && pVar->enmKind < kIemNativeVarKind_End);
7337 pVar->fRegAcquired = true;
7338 return idxReg;
7339 }
7340
7341 /*
7342 * If the kind of variable has not yet been set, default to 'stack'.
7343 */
7344 Assert( pVar->enmKind >= kIemNativeVarKind_Invalid
7345 && pVar->enmKind < kIemNativeVarKind_End);
7346 if (pVar->enmKind == kIemNativeVarKind_Invalid)
7347 iemNativeVarSetKindToStack(pReNative, idxVar);
7348
7349 /*
7350 * We have to allocate a register for the variable, even if its a stack one
7351 * as we don't know if there are modification being made to it before its
7352 * finalized (todo: analyze and insert hints about that?).
7353 *
7354 * If we can, we try get the correct register for argument variables. This
7355 * is assuming that most argument variables are fetched as close as possible
7356 * to the actual call, so that there aren't any interfering hidden calls
7357 * (memory accesses, etc) inbetween.
7358 *
7359 * If we cannot or it's a variable, we make sure no argument registers
7360 * that will be used by this MC block will be allocated here, and we always
7361 * prefer non-volatile registers to avoid needing to spill stuff for internal
7362 * call.
7363 */
7364 /** @todo Detect too early argument value fetches and warn about hidden
7365 * calls causing less optimal code to be generated in the python script. */
7366
7367 uint8_t const uArgNo = pVar->uArgNo;
7368 if ( uArgNo < RT_ELEMENTS(g_aidxIemNativeCallRegs)
7369 && !(pReNative->Core.bmHstRegs & RT_BIT_32(g_aidxIemNativeCallRegs[uArgNo])))
7370 {
7371 idxReg = g_aidxIemNativeCallRegs[uArgNo];
7372 iemNativeRegClearGstRegShadowing(pReNative, idxReg, *poff);
7373 Log11(("iemNativeVarRegisterAcquire: idxVar=%#x idxReg=%u (matching arg %u)\n", idxVar, idxReg, uArgNo));
7374 }
7375 else if ( idxRegPref >= RT_ELEMENTS(pReNative->Core.aHstRegs)
7376 || (pReNative->Core.bmHstRegs & RT_BIT_32(idxRegPref)))
7377 {
7378 uint32_t const fNotArgsMask = ~g_afIemNativeCallRegs[RT_MIN(pReNative->cArgs, IEMNATIVE_CALL_ARG_GREG_COUNT)];
7379 uint32_t const fRegs = ~pReNative->Core.bmHstRegs
7380 & ~pReNative->Core.bmHstRegsWithGstShadow
7381 & (~IEMNATIVE_REG_FIXED_MASK & IEMNATIVE_HST_GREG_MASK)
7382 & fNotArgsMask;
7383 if (fRegs)
7384 {
7385 /* Pick from the top as that both arm64 and amd64 have a block of non-volatile registers there. */
7386 idxReg = (uint8_t)ASMBitLastSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK
7387 ? fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK : fRegs) - 1;
7388 Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows == 0);
7389 Assert(!(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg)));
7390 Log11(("iemNativeVarRegisterAcquire: idxVar=%#x idxReg=%u (uArgNo=%u)\n", idxVar, idxReg, uArgNo));
7391 }
7392 else
7393 {
7394 idxReg = iemNativeRegAllocFindFree(pReNative, poff, false /*fPreferVolatile*/,
7395 IEMNATIVE_HST_GREG_MASK & ~IEMNATIVE_REG_FIXED_MASK & fNotArgsMask);
7396 AssertStmt(idxReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_VAR));
7397 Log11(("iemNativeVarRegisterAcquire: idxVar=%#x idxReg=%u (slow, uArgNo=%u)\n", idxVar, idxReg, uArgNo));
7398 }
7399 }
7400 else
7401 {
7402 idxReg = idxRegPref;
7403 iemNativeRegClearGstRegShadowing(pReNative, idxReg, *poff);
7404 Log11(("iemNativeVarRegisterAcquire: idxVar=%#x idxReg=%u (preferred)\n", idxVar, idxReg));
7405 }
7406 iemNativeRegMarkAllocated(pReNative, idxReg, kIemNativeWhat_Var, idxVar);
7407 pVar->idxReg = idxReg;
7408
7409#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
7410 pVar->fSimdReg = false;
7411#endif
7412
7413 /*
7414 * Load it off the stack if we've got a stack slot.
7415 */
7416 uint8_t const idxStackSlot = pVar->idxStackSlot;
7417 if (idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS)
7418 {
7419 Assert(fInitialized);
7420 int32_t const offDispBp = iemNativeStackCalcBpDisp(idxStackSlot);
7421 switch (pVar->cbVar)
7422 {
7423 case 1: *poff = iemNativeEmitLoadGprByBpU8( pReNative, *poff, idxReg, offDispBp); break;
7424 case 2: *poff = iemNativeEmitLoadGprByBpU16(pReNative, *poff, idxReg, offDispBp); break;
7425 case 3: AssertFailed(); RT_FALL_THRU();
7426 case 4: *poff = iemNativeEmitLoadGprByBpU32(pReNative, *poff, idxReg, offDispBp); break;
7427 default: AssertFailed(); RT_FALL_THRU();
7428 case 8: *poff = iemNativeEmitLoadGprByBp( pReNative, *poff, idxReg, offDispBp); break;
7429 }
7430 }
7431 else
7432 {
7433 Assert(idxStackSlot == UINT8_MAX);
7434 AssertStmt(!fInitialized, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_NOT_INITIALIZED));
7435 }
7436 pVar->fRegAcquired = true;
7437 return idxReg;
7438}
7439
7440
7441#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
7442/**
7443 * Makes sure variable @a idxVar has a SIMD register assigned to it and that it stays
7444 * fixed till we call iemNativeVarRegisterRelease.
7445 *
7446 * @returns The host register number.
7447 * @param pReNative The recompiler state.
7448 * @param idxVar The variable.
7449 * @param poff Pointer to the instruction buffer offset.
7450 * In case a register needs to be freed up or the value
7451 * loaded off the stack.
7452 * @param fInitialized Set if the variable must already have been initialized.
7453 * Will throw VERR_IEM_VAR_NOT_INITIALIZED if this is not
7454 * the case.
7455 * @param idxRegPref Preferred SIMD register number or UINT8_MAX.
7456 */
7457DECL_HIDDEN_THROW(uint8_t) iemNativeVarSimdRegisterAcquire(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, uint32_t *poff,
7458 bool fInitialized /*= false*/, uint8_t idxRegPref /*= UINT8_MAX*/)
7459{
7460 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7461 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
7462 Assert( pVar->cbVar == sizeof(RTUINT128U)
7463 || pVar->cbVar == sizeof(RTUINT256U));
7464 Assert(!pVar->fRegAcquired);
7465
7466 uint8_t idxReg = pVar->idxReg;
7467 if (idxReg < RT_ELEMENTS(pReNative->Core.aHstSimdRegs))
7468 {
7469 Assert( pVar->enmKind > kIemNativeVarKind_Invalid
7470 && pVar->enmKind < kIemNativeVarKind_End);
7471 pVar->fRegAcquired = true;
7472 return idxReg;
7473 }
7474
7475 /*
7476 * If the kind of variable has not yet been set, default to 'stack'.
7477 */
7478 Assert( pVar->enmKind >= kIemNativeVarKind_Invalid
7479 && pVar->enmKind < kIemNativeVarKind_End);
7480 if (pVar->enmKind == kIemNativeVarKind_Invalid)
7481 iemNativeVarSetKindToStack(pReNative, idxVar);
7482
7483 /*
7484 * We have to allocate a register for the variable, even if its a stack one
7485 * as we don't know if there are modification being made to it before its
7486 * finalized (todo: analyze and insert hints about that?).
7487 *
7488 * If we can, we try get the correct register for argument variables. This
7489 * is assuming that most argument variables are fetched as close as possible
7490 * to the actual call, so that there aren't any interfering hidden calls
7491 * (memory accesses, etc) inbetween.
7492 *
7493 * If we cannot or it's a variable, we make sure no argument registers
7494 * that will be used by this MC block will be allocated here, and we always
7495 * prefer non-volatile registers to avoid needing to spill stuff for internal
7496 * call.
7497 */
7498 /** @todo Detect too early argument value fetches and warn about hidden
7499 * calls causing less optimal code to be generated in the python script. */
7500
7501 uint8_t const uArgNo = pVar->uArgNo;
7502 Assert(uArgNo == UINT8_MAX); RT_NOREF(uArgNo); /* No SIMD registers as arguments for now. */
7503
7504 /* SIMD is bit simpler for now because there is no support for arguments. */
7505 if ( idxRegPref >= RT_ELEMENTS(pReNative->Core.aHstSimdRegs)
7506 || (pReNative->Core.bmHstSimdRegs & RT_BIT_32(idxRegPref)))
7507 {
7508 uint32_t const fNotArgsMask = UINT32_MAX; //~g_afIemNativeCallRegs[RT_MIN(pReNative->cArgs, IEMNATIVE_CALL_ARG_GREG_COUNT)];
7509 uint32_t const fRegs = ~pReNative->Core.bmHstSimdRegs
7510 & ~pReNative->Core.bmHstSimdRegsWithGstShadow
7511 & (~IEMNATIVE_SIMD_REG_FIXED_MASK & IEMNATIVE_HST_SIMD_REG_MASK)
7512 & fNotArgsMask;
7513 if (fRegs)
7514 {
7515 idxReg = (uint8_t)ASMBitLastSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK
7516 ? fRegs & ~IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK : fRegs) - 1;
7517 Assert(pReNative->Core.aHstSimdRegs[idxReg].fGstRegShadows == 0);
7518 Assert(!(pReNative->Core.bmHstSimdRegsWithGstShadow & RT_BIT_32(idxReg)));
7519 Log11(("iemNativeVarSimdRegisterAcquire: idxVar=%#x idxReg=%u (uArgNo=%u)\n", idxVar, idxReg, uArgNo));
7520 }
7521 else
7522 {
7523 idxReg = iemNativeSimdRegAllocFindFree(pReNative, poff, false /*fPreferVolatile*/,
7524 IEMNATIVE_HST_SIMD_REG_MASK & ~IEMNATIVE_SIMD_REG_FIXED_MASK & fNotArgsMask);
7525 AssertStmt(idxReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_VAR));
7526 Log11(("iemNativeVarSimdRegisterAcquire: idxVar=%#x idxReg=%u (slow, uArgNo=%u)\n", idxVar, idxReg, uArgNo));
7527 }
7528 }
7529 else
7530 {
7531 idxReg = idxRegPref;
7532 AssertReleaseFailed(); //iemNativeRegClearGstRegShadowing(pReNative, idxReg, *poff);
7533 Log11(("iemNativeVarSimdRegisterAcquire: idxVar=%#x idxReg=%u (preferred)\n", idxVar, idxReg));
7534 }
7535 iemNativeSimdRegMarkAllocated(pReNative, idxReg, kIemNativeWhat_Var, idxVar);
7536
7537 pVar->fSimdReg = true;
7538 pVar->idxReg = idxReg;
7539
7540 /*
7541 * Load it off the stack if we've got a stack slot.
7542 */
7543 uint8_t const idxStackSlot = pVar->idxStackSlot;
7544 if (idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS)
7545 {
7546 Assert(fInitialized);
7547 int32_t const offDispBp = iemNativeStackCalcBpDisp(idxStackSlot);
7548 switch (pVar->cbVar)
7549 {
7550 case sizeof(RTUINT128U): *poff = iemNativeEmitLoadVecRegByBpU128(pReNative, *poff, idxReg, offDispBp); break;
7551 default: AssertFailed(); RT_FALL_THRU();
7552 case sizeof(RTUINT256U): *poff = iemNativeEmitLoadVecRegByBpU256(pReNative, *poff, idxReg, offDispBp); break;
7553 }
7554 }
7555 else
7556 {
7557 Assert(idxStackSlot == UINT8_MAX);
7558 AssertStmt(!fInitialized, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_NOT_INITIALIZED));
7559 }
7560 pVar->fRegAcquired = true;
7561 return idxReg;
7562}
7563#endif
7564
7565
7566/**
7567 * The value of variable @a idxVar will be written in full to the @a enmGstReg
7568 * guest register.
7569 *
7570 * This function makes sure there is a register for it and sets it to be the
7571 * current shadow copy of @a enmGstReg.
7572 *
7573 * @returns The host register number.
7574 * @param pReNative The recompiler state.
7575 * @param idxVar The variable.
7576 * @param enmGstReg The guest register this variable will be written to
7577 * after this call.
7578 * @param poff Pointer to the instruction buffer offset.
7579 * In case a register needs to be freed up or if the
7580 * variable content needs to be loaded off the stack.
7581 *
7582 * @note We DO NOT expect @a idxVar to be an argument variable,
7583 * because we can only in the commit stage of an instruction when this
7584 * function is used.
7585 */
7586DECL_HIDDEN_THROW(uint8_t)
7587iemNativeVarRegisterAcquireForGuestReg(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, IEMNATIVEGSTREG enmGstReg, uint32_t *poff)
7588{
7589 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7590 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
7591 Assert(!pVar->fRegAcquired);
7592 AssertMsgStmt( pVar->cbVar <= 8
7593 && ( pVar->enmKind == kIemNativeVarKind_Immediate
7594 || pVar->enmKind == kIemNativeVarKind_Stack),
7595 ("idxVar=%#x cbVar=%d enmKind=%d enmGstReg=%s\n", idxVar, pVar->cbVar,
7596 pVar->enmKind, g_aGstShadowInfo[enmGstReg].pszName),
7597 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_6));
7598
7599 /*
7600 * This shouldn't ever be used for arguments, unless it's in a weird else
7601 * branch that doesn't do any calling and even then it's questionable.
7602 *
7603 * However, in case someone writes crazy wrong MC code and does register
7604 * updates before making calls, just use the regular register allocator to
7605 * ensure we get a register suitable for the intended argument number.
7606 */
7607 AssertStmt(pVar->uArgNo == UINT8_MAX, iemNativeVarRegisterAcquire(pReNative, idxVar, poff));
7608
7609 /*
7610 * If there is already a register for the variable, we transfer/set the
7611 * guest shadow copy assignment to it.
7612 */
7613 uint8_t idxReg = pVar->idxReg;
7614 if (idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
7615 {
7616 if (pReNative->Core.bmGstRegShadows & RT_BIT_64(enmGstReg))
7617 {
7618 uint8_t const idxRegOld = pReNative->Core.aidxGstRegShadows[enmGstReg];
7619 iemNativeRegTransferGstRegShadowing(pReNative, idxRegOld, idxReg, enmGstReg, *poff);
7620 Log12(("iemNativeVarRegisterAcquireForGuestReg: Moved %s for guest %s into %s for full write\n",
7621 g_apszIemNativeHstRegNames[idxRegOld], g_aGstShadowInfo[enmGstReg].pszName, g_apszIemNativeHstRegNames[idxReg]));
7622 }
7623 else
7624 {
7625 iemNativeRegMarkAsGstRegShadow(pReNative, idxReg, enmGstReg, *poff);
7626 Log12(("iemNativeVarRegisterAcquireForGuestReg: Marking %s as copy of guest %s (full write)\n",
7627 g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName));
7628 }
7629 /** @todo figure this one out. We need some way of making sure the register isn't
7630 * modified after this point, just in case we start writing crappy MC code. */
7631 pVar->enmGstReg = enmGstReg;
7632 pVar->fRegAcquired = true;
7633 return idxReg;
7634 }
7635 Assert(pVar->uArgNo == UINT8_MAX);
7636
7637 /*
7638 * Because this is supposed to be the commit stage, we're just tag along with the
7639 * temporary register allocator and upgrade it to a variable register.
7640 */
7641 idxReg = iemNativeRegAllocTmpForGuestReg(pReNative, poff, enmGstReg, kIemNativeGstRegUse_ForFullWrite);
7642 Assert(pReNative->Core.aHstRegs[idxReg].enmWhat == kIemNativeWhat_Tmp);
7643 Assert(pReNative->Core.aHstRegs[idxReg].idxVar == UINT8_MAX);
7644 pReNative->Core.aHstRegs[idxReg].enmWhat = kIemNativeWhat_Var;
7645 pReNative->Core.aHstRegs[idxReg].idxVar = idxVar;
7646 pVar->idxReg = idxReg;
7647
7648 /*
7649 * Now we need to load the register value.
7650 */
7651 if (pVar->enmKind == kIemNativeVarKind_Immediate)
7652 *poff = iemNativeEmitLoadGprImm64(pReNative, *poff, idxReg, pVar->u.uValue);
7653 else
7654 {
7655 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
7656 int32_t const offDispBp = iemNativeStackCalcBpDisp(idxStackSlot);
7657 switch (pVar->cbVar)
7658 {
7659 case sizeof(uint64_t):
7660 *poff = iemNativeEmitLoadGprByBp(pReNative, *poff, idxReg, offDispBp);
7661 break;
7662 case sizeof(uint32_t):
7663 *poff = iemNativeEmitLoadGprByBpU32(pReNative, *poff, idxReg, offDispBp);
7664 break;
7665 case sizeof(uint16_t):
7666 *poff = iemNativeEmitLoadGprByBpU16(pReNative, *poff, idxReg, offDispBp);
7667 break;
7668 case sizeof(uint8_t):
7669 *poff = iemNativeEmitLoadGprByBpU8(pReNative, *poff, idxReg, offDispBp);
7670 break;
7671 default:
7672 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_6));
7673 }
7674 }
7675
7676 pVar->fRegAcquired = true;
7677 return idxReg;
7678}
7679
7680
7681/**
7682 * Emit code to save volatile registers prior to a call to a helper (TLB miss).
7683 *
7684 * This is used together with iemNativeVarRestoreVolatileRegsPostHlpCall() and
7685 * optionally iemNativeRegRestoreGuestShadowsInVolatileRegs() to bypass the
7686 * requirement of flushing anything in volatile host registers when making a
7687 * call.
7688 *
7689 * @returns New @a off value.
7690 * @param pReNative The recompiler state.
7691 * @param off The code buffer position.
7692 * @param fHstRegsNotToSave Set of registers not to save & restore.
7693 */
7694DECL_HIDDEN_THROW(uint32_t)
7695iemNativeVarSaveVolatileRegsPreHlpCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fHstRegsNotToSave)
7696{
7697 uint32_t fHstRegs = pReNative->Core.bmHstRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK & ~fHstRegsNotToSave;
7698 if (fHstRegs)
7699 {
7700 do
7701 {
7702 unsigned int const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1;
7703 fHstRegs &= ~RT_BIT_32(idxHstReg);
7704
7705 if (pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Var)
7706 {
7707 uint8_t const idxVar = pReNative->Core.aHstRegs[idxHstReg].idxVar;
7708 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7709 AssertStmt( IEMNATIVE_VAR_IDX_UNPACK(idxVar) < RT_ELEMENTS(pReNative->Core.aVars)
7710 && (pReNative->Core.bmVars & RT_BIT_32(IEMNATIVE_VAR_IDX_UNPACK(idxVar)))
7711 && pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxHstReg,
7712 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_12));
7713 switch (pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].enmKind)
7714 {
7715 case kIemNativeVarKind_Stack:
7716 {
7717 /* Temporarily spill the variable register. */
7718 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
7719 Log12(("iemNativeVarSaveVolatileRegsPreHlpCall: spilling idxVar=%#x/idxReg=%d onto the stack (slot %#x bp+%d, off=%#x)\n",
7720 idxVar, idxHstReg, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
7721 off = iemNativeEmitStoreGprByBp(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxHstReg);
7722 continue;
7723 }
7724
7725 case kIemNativeVarKind_Immediate:
7726 case kIemNativeVarKind_VarRef:
7727 case kIemNativeVarKind_GstRegRef:
7728 /* It is weird to have any of these loaded at this point. */
7729 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_13));
7730 continue;
7731
7732 case kIemNativeVarKind_End:
7733 case kIemNativeVarKind_Invalid:
7734 break;
7735 }
7736 AssertFailed();
7737 }
7738 else
7739 {
7740 /*
7741 * Allocate a temporary stack slot and spill the register to it.
7742 */
7743 unsigned const idxStackSlot = ASMBitLastSetU32(~pReNative->Core.bmStack) - 1;
7744 AssertStmt(idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS,
7745 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_OUT_OF_STACK_SLOTS));
7746 pReNative->Core.bmStack |= RT_BIT_32(idxStackSlot);
7747 pReNative->Core.aHstRegs[idxHstReg].idxStackSlot = (uint8_t)idxStackSlot;
7748 Log12(("iemNativeVarSaveVolatileRegsPreHlpCall: spilling idxReg=%d onto the stack (slot %#x bp+%d, off=%#x)\n",
7749 idxHstReg, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
7750 off = iemNativeEmitStoreGprByBp(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxHstReg);
7751 }
7752 } while (fHstRegs);
7753 }
7754#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
7755 fHstRegs = pReNative->Core.bmHstSimdRegs & IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK;
7756 if (fHstRegs)
7757 {
7758 do
7759 {
7760 unsigned int const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1;
7761 fHstRegs &= ~RT_BIT_32(idxHstReg);
7762
7763 /*
7764 * Guest registers are flushed to CPUMCTX at the moment and don't need allocating a stack slot
7765 * which would be more difficult due to spanning multiple stack slots and different sizes
7766 * (besides we only have a limited amount of slots at the moment). Fixed temporary registers
7767 * don't need saving.
7768 */
7769 if ( pReNative->Core.aHstSimdRegs[idxHstReg].enmWhat == kIemNativeWhat_FixedTmp
7770 || pReNative->Core.aHstSimdRegs[idxHstReg].enmWhat == kIemNativeWhat_FixedReserved)
7771 continue;
7772
7773 Assert(pReNative->Core.aHstSimdRegs[idxHstReg].enmWhat == kIemNativeWhat_Var);
7774
7775 uint8_t const idxVar = pReNative->Core.aHstSimdRegs[idxHstReg].idxVar;
7776 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7777 AssertStmt( IEMNATIVE_VAR_IDX_UNPACK(idxVar) < RT_ELEMENTS(pReNative->Core.aVars)
7778 && (pReNative->Core.bmVars & RT_BIT_32(IEMNATIVE_VAR_IDX_UNPACK(idxVar)))
7779 && pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxHstReg
7780 && pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fSimdReg
7781 && ( pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].cbVar == sizeof(RTUINT128U)
7782 || pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].cbVar == sizeof(RTUINT256U)),
7783 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_12));
7784 switch (pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].enmKind)
7785 {
7786 case kIemNativeVarKind_Stack:
7787 {
7788 /* Temporarily spill the variable register. */
7789 uint8_t const cbVar = pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].cbVar;
7790 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
7791 Log12(("iemNativeVarSaveVolatileRegsPreHlpCall: spilling idxVar=%#x/idxReg=%d onto the stack (slot %#x bp+%d, off=%#x)\n",
7792 idxVar, idxHstReg, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
7793 if (cbVar == sizeof(RTUINT128U))
7794 off = iemNativeEmitStoreVecRegByBpU128(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxHstReg);
7795 else
7796 off = iemNativeEmitStoreVecRegByBpU256(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxHstReg);
7797 continue;
7798 }
7799
7800 case kIemNativeVarKind_Immediate:
7801 case kIemNativeVarKind_VarRef:
7802 case kIemNativeVarKind_GstRegRef:
7803 /* It is weird to have any of these loaded at this point. */
7804 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_13));
7805 continue;
7806
7807 case kIemNativeVarKind_End:
7808 case kIemNativeVarKind_Invalid:
7809 break;
7810 }
7811 AssertFailed();
7812 } while (fHstRegs);
7813 }
7814#endif
7815 return off;
7816}
7817
7818
7819/**
7820 * Emit code to restore volatile registers after to a call to a helper.
7821 *
7822 * @returns New @a off value.
7823 * @param pReNative The recompiler state.
7824 * @param off The code buffer position.
7825 * @param fHstRegsNotToSave Set of registers not to save & restore.
7826 * @see iemNativeVarSaveVolatileRegsPreHlpCall(),
7827 * iemNativeRegRestoreGuestShadowsInVolatileRegs()
7828 */
7829DECL_HIDDEN_THROW(uint32_t)
7830iemNativeVarRestoreVolatileRegsPostHlpCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fHstRegsNotToSave)
7831{
7832 uint32_t fHstRegs = pReNative->Core.bmHstRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK & ~fHstRegsNotToSave;
7833 if (fHstRegs)
7834 {
7835 do
7836 {
7837 unsigned int const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1;
7838 fHstRegs &= ~RT_BIT_32(idxHstReg);
7839
7840 if (pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Var)
7841 {
7842 uint8_t const idxVar = pReNative->Core.aHstRegs[idxHstReg].idxVar;
7843 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7844 AssertStmt( IEMNATIVE_VAR_IDX_UNPACK(idxVar) < RT_ELEMENTS(pReNative->Core.aVars)
7845 && (pReNative->Core.bmVars & RT_BIT_32(IEMNATIVE_VAR_IDX_UNPACK(idxVar)))
7846 && pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxHstReg,
7847 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_12));
7848 switch (pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].enmKind)
7849 {
7850 case kIemNativeVarKind_Stack:
7851 {
7852 /* Unspill the variable register. */
7853 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
7854 Log12(("iemNativeVarRestoreVolatileRegsPostHlpCall: unspilling idxVar=%#x/idxReg=%d (slot %#x bp+%d, off=%#x)\n",
7855 idxVar, idxHstReg, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
7856 off = iemNativeEmitLoadGprByBp(pReNative, off, idxHstReg, iemNativeStackCalcBpDisp(idxStackSlot));
7857 continue;
7858 }
7859
7860 case kIemNativeVarKind_Immediate:
7861 case kIemNativeVarKind_VarRef:
7862 case kIemNativeVarKind_GstRegRef:
7863 /* It is weird to have any of these loaded at this point. */
7864 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_13));
7865 continue;
7866
7867 case kIemNativeVarKind_End:
7868 case kIemNativeVarKind_Invalid:
7869 break;
7870 }
7871 AssertFailed();
7872 }
7873 else
7874 {
7875 /*
7876 * Restore from temporary stack slot.
7877 */
7878 uint8_t const idxStackSlot = pReNative->Core.aHstRegs[idxHstReg].idxStackSlot;
7879 AssertContinue(idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS && (pReNative->Core.bmStack & RT_BIT_32(idxStackSlot)));
7880 pReNative->Core.bmStack &= ~RT_BIT_32(idxStackSlot);
7881 pReNative->Core.aHstRegs[idxHstReg].idxStackSlot = UINT8_MAX;
7882
7883 off = iemNativeEmitLoadGprByBp(pReNative, off, idxHstReg, iemNativeStackCalcBpDisp(idxStackSlot));
7884 }
7885 } while (fHstRegs);
7886 }
7887#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
7888 fHstRegs = pReNative->Core.bmHstSimdRegs & IEMNATIVE_CALL_VOLATILE_SIMD_REG_MASK;
7889 if (fHstRegs)
7890 {
7891 do
7892 {
7893 unsigned int const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1;
7894 fHstRegs &= ~RT_BIT_32(idxHstReg);
7895
7896 if ( pReNative->Core.aHstSimdRegs[idxHstReg].enmWhat == kIemNativeWhat_FixedTmp
7897 || pReNative->Core.aHstSimdRegs[idxHstReg].enmWhat == kIemNativeWhat_FixedReserved)
7898 continue;
7899 Assert(pReNative->Core.aHstSimdRegs[idxHstReg].enmWhat == kIemNativeWhat_Var);
7900
7901 uint8_t const idxVar = pReNative->Core.aHstSimdRegs[idxHstReg].idxVar;
7902 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7903 AssertStmt( IEMNATIVE_VAR_IDX_UNPACK(idxVar) < RT_ELEMENTS(pReNative->Core.aVars)
7904 && (pReNative->Core.bmVars & RT_BIT_32(IEMNATIVE_VAR_IDX_UNPACK(idxVar)))
7905 && pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].idxReg == idxHstReg
7906 && pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].fSimdReg
7907 && ( pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].cbVar == sizeof(RTUINT128U)
7908 || pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].cbVar == sizeof(RTUINT256U)),
7909 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_12));
7910 switch (pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].enmKind)
7911 {
7912 case kIemNativeVarKind_Stack:
7913 {
7914 /* Unspill the variable register. */
7915 uint8_t const cbVar = pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)].cbVar;
7916 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
7917 Log12(("iemNativeVarRestoreVolatileRegsPostHlpCall: unspilling idxVar=%#x/idxReg=%d (slot %#x bp+%d, off=%#x)\n",
7918 idxVar, idxHstReg, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
7919
7920 if (cbVar == sizeof(RTUINT128U))
7921 off = iemNativeEmitLoadVecRegByBpU128(pReNative, off, idxHstReg, iemNativeStackCalcBpDisp(idxStackSlot));
7922 else
7923 off = iemNativeEmitLoadVecRegByBpU256(pReNative, off, idxHstReg, iemNativeStackCalcBpDisp(idxStackSlot));
7924 continue;
7925 }
7926
7927 case kIemNativeVarKind_Immediate:
7928 case kIemNativeVarKind_VarRef:
7929 case kIemNativeVarKind_GstRegRef:
7930 /* It is weird to have any of these loaded at this point. */
7931 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_13));
7932 continue;
7933
7934 case kIemNativeVarKind_End:
7935 case kIemNativeVarKind_Invalid:
7936 break;
7937 }
7938 AssertFailed();
7939 } while (fHstRegs);
7940 }
7941#endif
7942 return off;
7943}
7944
7945
7946/**
7947 * Worker that frees the stack slots for variable @a idxVar if any allocated.
7948 *
7949 * This is used both by iemNativeVarFreeOneWorker and iemNativeEmitCallCommon.
7950 *
7951 * ASSUMES that @a idxVar is valid and unpacked.
7952 */
7953DECL_FORCE_INLINE(void) iemNativeVarFreeStackSlots(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
7954{
7955 Assert(idxVar < RT_ELEMENTS(pReNative->Core.aVars)); /* unpacked! */
7956 uint8_t const idxStackSlot = pReNative->Core.aVars[idxVar].idxStackSlot;
7957 if (idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS)
7958 {
7959 uint8_t const cbVar = pReNative->Core.aVars[idxVar].cbVar;
7960 uint8_t const cSlots = (cbVar + sizeof(uint64_t) - 1) / sizeof(uint64_t);
7961 uint32_t const fAllocMask = (uint32_t)(RT_BIT_32(cSlots) - 1U);
7962 Assert(cSlots > 0);
7963 Assert(((pReNative->Core.bmStack >> idxStackSlot) & fAllocMask) == fAllocMask);
7964 Log11(("iemNativeVarFreeStackSlots: idxVar=%d/%#x iSlot=%#x/%#x (cbVar=%#x)\n",
7965 idxVar, IEMNATIVE_VAR_IDX_PACK(idxVar), idxStackSlot, fAllocMask, cbVar));
7966 pReNative->Core.bmStack &= ~(fAllocMask << idxStackSlot);
7967 pReNative->Core.aVars[idxVar].idxStackSlot = UINT8_MAX;
7968 }
7969 else
7970 Assert(idxStackSlot == UINT8_MAX);
7971}
7972
7973
7974/**
7975 * Worker that frees a single variable.
7976 *
7977 * ASSUMES that @a idxVar is valid and unpacked.
7978 */
7979DECLHIDDEN(void) iemNativeVarFreeOneWorker(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
7980{
7981 Assert( pReNative->Core.aVars[idxVar].enmKind >= kIemNativeVarKind_Invalid /* Including invalid as we may have unused */
7982 && pReNative->Core.aVars[idxVar].enmKind < kIemNativeVarKind_End); /* variables in conditional branches. */
7983 Assert(!pReNative->Core.aVars[idxVar].fRegAcquired);
7984
7985 /* Free the host register first if any assigned. */
7986 uint8_t const idxHstReg = pReNative->Core.aVars[idxVar].idxReg;
7987#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
7988 if ( idxHstReg != UINT8_MAX
7989 && pReNative->Core.aVars[idxVar].fSimdReg)
7990 {
7991 Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aHstSimdRegs));
7992 Assert(pReNative->Core.aHstSimdRegs[idxHstReg].idxVar == IEMNATIVE_VAR_IDX_PACK(idxVar));
7993 pReNative->Core.aHstSimdRegs[idxHstReg].idxVar = UINT8_MAX;
7994 pReNative->Core.bmHstSimdRegs &= ~RT_BIT_32(idxHstReg);
7995 }
7996 else
7997#endif
7998 if (idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
7999 {
8000 Assert(pReNative->Core.aHstRegs[idxHstReg].idxVar == IEMNATIVE_VAR_IDX_PACK(idxVar));
8001 pReNative->Core.aHstRegs[idxHstReg].idxVar = UINT8_MAX;
8002 pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg);
8003 }
8004
8005 /* Free argument mapping. */
8006 uint8_t const uArgNo = pReNative->Core.aVars[idxVar].uArgNo;
8007 if (uArgNo < RT_ELEMENTS(pReNative->Core.aidxArgVars))
8008 pReNative->Core.aidxArgVars[uArgNo] = UINT8_MAX;
8009
8010 /* Free the stack slots. */
8011 iemNativeVarFreeStackSlots(pReNative, idxVar);
8012
8013 /* Free the actual variable. */
8014 pReNative->Core.aVars[idxVar].enmKind = kIemNativeVarKind_Invalid;
8015 pReNative->Core.bmVars &= ~RT_BIT_32(idxVar);
8016}
8017
8018
8019/**
8020 * Worker for iemNativeVarFreeAll that's called when there is anything to do.
8021 */
8022DECLHIDDEN(void) iemNativeVarFreeAllSlow(PIEMRECOMPILERSTATE pReNative, uint32_t bmVars)
8023{
8024 while (bmVars != 0)
8025 {
8026 uint8_t const idxVar = ASMBitFirstSetU32(bmVars) - 1;
8027 bmVars &= ~RT_BIT_32(idxVar);
8028
8029#if 1 /** @todo optimize by simplifying this later... */
8030 iemNativeVarFreeOneWorker(pReNative, idxVar);
8031#else
8032 /* Only need to free the host register, the rest is done as bulk updates below. */
8033 uint8_t const idxHstReg = pReNative->Core.aVars[idxVar].idxReg;
8034 if (idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
8035 {
8036 Assert(pReNative->Core.aHstRegs[idxHstReg].idxVar == IEMNATIVE_VAR_IDX_PACK(idxVar));
8037 pReNative->Core.aHstRegs[idxHstReg].idxVar = UINT8_MAX;
8038 pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg);
8039 }
8040#endif
8041 }
8042#if 0 /** @todo optimize by simplifying this later... */
8043 pReNative->Core.bmVars = 0;
8044 pReNative->Core.bmStack = 0;
8045 pReNative->Core.u64ArgVars = UINT64_MAX;
8046#endif
8047}
8048
8049
8050
8051/*********************************************************************************************************************************
8052* Emitters for IEM_MC_CALL_CIMPL_XXX *
8053*********************************************************************************************************************************/
8054
8055/**
8056 * Emits code to load a reference to the given guest register into @a idxGprDst.
8057 */
8058DECL_HIDDEN_THROW(uint32_t)
8059iemNativeEmitLeaGprByGstRegRef(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxGprDst,
8060 IEMNATIVEGSTREGREF enmClass, uint8_t idxRegInClass)
8061{
8062#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
8063 /** @todo If we ever gonna allow referencing the RIP register we need to update guest value here. */
8064#endif
8065
8066 /*
8067 * Get the offset relative to the CPUMCTX structure.
8068 */
8069 uint32_t offCpumCtx;
8070 switch (enmClass)
8071 {
8072 case kIemNativeGstRegRef_Gpr:
8073 Assert(idxRegInClass < 16);
8074 offCpumCtx = RT_UOFFSETOF_DYN(CPUMCTX, aGRegs[idxRegInClass]);
8075 break;
8076
8077 case kIemNativeGstRegRef_GprHighByte: /**< AH, CH, DH, BH*/
8078 Assert(idxRegInClass < 4);
8079 offCpumCtx = RT_UOFFSETOF_DYN(CPUMCTX, aGRegs[0].bHi) + idxRegInClass * sizeof(CPUMCTXGREG);
8080 break;
8081
8082 case kIemNativeGstRegRef_EFlags:
8083 Assert(idxRegInClass == 0);
8084 offCpumCtx = RT_UOFFSETOF(CPUMCTX, eflags);
8085 break;
8086
8087 case kIemNativeGstRegRef_MxCsr:
8088 Assert(idxRegInClass == 0);
8089 offCpumCtx = RT_UOFFSETOF(CPUMCTX, XState.x87.MXCSR);
8090 break;
8091
8092 case kIemNativeGstRegRef_FpuReg:
8093 Assert(idxRegInClass < 8);
8094 AssertFailed(); /** @todo what kind of indexing? */
8095 offCpumCtx = RT_UOFFSETOF_DYN(CPUMCTX, XState.x87.aRegs[idxRegInClass]);
8096 break;
8097
8098 case kIemNativeGstRegRef_MReg:
8099 Assert(idxRegInClass < 8);
8100 AssertFailed(); /** @todo what kind of indexing? */
8101 offCpumCtx = RT_UOFFSETOF_DYN(CPUMCTX, XState.x87.aRegs[idxRegInClass]);
8102 break;
8103
8104 case kIemNativeGstRegRef_XReg:
8105 Assert(idxRegInClass < 16);
8106 offCpumCtx = RT_UOFFSETOF_DYN(CPUMCTX, XState.x87.aXMM[idxRegInClass]);
8107 break;
8108
8109 case kIemNativeGstRegRef_X87: /* Not a register actually but we would just duplicate code otherwise. */
8110 Assert(idxRegInClass == 0);
8111 offCpumCtx = RT_UOFFSETOF(CPUMCTX, XState.x87);
8112 break;
8113
8114 case kIemNativeGstRegRef_XState: /* Not a register actually but we would just duplicate code otherwise. */
8115 Assert(idxRegInClass == 0);
8116 offCpumCtx = RT_UOFFSETOF(CPUMCTX, XState);
8117 break;
8118
8119 default:
8120 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_5));
8121 }
8122
8123 /*
8124 * Load the value into the destination register.
8125 */
8126#ifdef RT_ARCH_AMD64
8127 off = iemNativeEmitLeaGprByVCpu(pReNative, off, idxGprDst, offCpumCtx + RT_UOFFSETOF(VMCPUCC, cpum.GstCtx));
8128
8129#elif defined(RT_ARCH_ARM64)
8130 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
8131 Assert(offCpumCtx < 4096);
8132 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, idxGprDst, IEMNATIVE_REG_FIXED_PCPUMCTX, offCpumCtx);
8133
8134#else
8135# error "Port me!"
8136#endif
8137
8138 return off;
8139}
8140
8141
8142/**
8143 * Common code for CIMPL and AIMPL calls.
8144 *
8145 * These are calls that uses argument variables and such. They should not be
8146 * confused with internal calls required to implement an MC operation,
8147 * like a TLB load and similar.
8148 *
8149 * Upon return all that is left to do is to load any hidden arguments and
8150 * perform the call. All argument variables are freed.
8151 *
8152 * @returns New code buffer offset; throws VBox status code on error.
8153 * @param pReNative The native recompile state.
8154 * @param off The code buffer offset.
8155 * @param cArgs The total nubmer of arguments (includes hidden
8156 * count).
8157 * @param cHiddenArgs The number of hidden arguments. The hidden
8158 * arguments must not have any variable declared for
8159 * them, whereas all the regular arguments must
8160 * (tstIEMCheckMc ensures this).
8161 */
8162DECL_HIDDEN_THROW(uint32_t)
8163iemNativeEmitCallCommon(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs, uint8_t cHiddenArgs)
8164{
8165#ifdef VBOX_STRICT
8166 /*
8167 * Assert sanity.
8168 */
8169 Assert(cArgs <= IEMNATIVE_CALL_MAX_ARG_COUNT);
8170 Assert(cHiddenArgs < IEMNATIVE_CALL_ARG_GREG_COUNT);
8171 for (unsigned i = 0; i < cHiddenArgs; i++)
8172 Assert(pReNative->Core.aidxArgVars[i] == UINT8_MAX);
8173 for (unsigned i = cHiddenArgs; i < cArgs; i++)
8174 {
8175 Assert(pReNative->Core.aidxArgVars[i] != UINT8_MAX); /* checked by tstIEMCheckMc.cpp */
8176 Assert(pReNative->Core.bmVars & RT_BIT_32(pReNative->Core.aidxArgVars[i]));
8177 }
8178 iemNativeRegAssertSanity(pReNative);
8179#endif
8180
8181 /* We don't know what the called function makes use of, so flush any pending register writes. */
8182 off = iemNativeRegFlushPendingWrites(pReNative, off);
8183
8184 /*
8185 * Before we do anything else, go over variables that are referenced and
8186 * make sure they are not in a register.
8187 */
8188 uint32_t bmVars = pReNative->Core.bmVars;
8189 if (bmVars)
8190 {
8191 do
8192 {
8193 uint8_t const idxVar = ASMBitFirstSetU32(bmVars) - 1;
8194 bmVars &= ~RT_BIT_32(idxVar);
8195
8196 if (pReNative->Core.aVars[idxVar].idxReferrerVar != UINT8_MAX)
8197 {
8198 uint8_t const idxRegOld = pReNative->Core.aVars[idxVar].idxReg;
8199#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
8200 if ( idxRegOld != UINT8_MAX
8201 && pReNative->Core.aVars[idxVar].fSimdReg)
8202 {
8203 Assert(idxRegOld < RT_ELEMENTS(pReNative->Core.aHstSimdRegs));
8204 Assert(pReNative->Core.aVars[idxVar].cbVar == sizeof(RTUINT128U) || pReNative->Core.aVars[idxVar].cbVar == sizeof(RTUINT256U));
8205
8206 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, IEMNATIVE_VAR_IDX_PACK(idxVar));
8207 Log12(("iemNativeEmitCallCommon: spilling idxVar=%d/%#x/idxReg=%d (referred to by %d) onto the stack (slot %#x bp+%d, off=%#x)\n",
8208 idxVar, IEMNATIVE_VAR_IDX_PACK(idxVar), idxRegOld, pReNative->Core.aVars[idxVar].idxReferrerVar,
8209 idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
8210 if (pReNative->Core.aVars[idxVar].cbVar == sizeof(RTUINT128U))
8211 off = iemNativeEmitStoreVecRegByBpU128(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxRegOld);
8212 else
8213 off = iemNativeEmitStoreVecRegByBpU256(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxRegOld);
8214
8215 Assert(!( (pReNative->Core.bmGstSimdRegShadowDirtyLo128 | pReNative->Core.bmGstSimdRegShadowDirtyHi128)
8216 & pReNative->Core.aHstSimdRegs[idxRegOld].fGstRegShadows));
8217
8218 pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX;
8219 pReNative->Core.bmHstSimdRegs &= ~RT_BIT_32(idxRegOld);
8220 pReNative->Core.bmHstSimdRegsWithGstShadow &= ~RT_BIT_32(idxRegOld);
8221 pReNative->Core.bmGstSimdRegShadows &= ~pReNative->Core.aHstSimdRegs[idxRegOld].fGstRegShadows;
8222 pReNative->Core.aHstSimdRegs[idxRegOld].fGstRegShadows = 0;
8223 }
8224 else
8225#endif
8226 if (idxRegOld < RT_ELEMENTS(pReNative->Core.aHstRegs))
8227 {
8228 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, IEMNATIVE_VAR_IDX_PACK(idxVar));
8229 Log12(("iemNativeEmitCallCommon: spilling idxVar=%d/%#x/idxReg=%d (referred to by %d) onto the stack (slot %#x bp+%d, off=%#x)\n",
8230 idxVar, IEMNATIVE_VAR_IDX_PACK(idxVar), idxRegOld, pReNative->Core.aVars[idxVar].idxReferrerVar,
8231 idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
8232 off = iemNativeEmitStoreGprByBp(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxRegOld);
8233
8234 pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX;
8235 pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxRegOld);
8236 pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxRegOld);
8237 pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows;
8238 pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows = 0;
8239 }
8240 }
8241 } while (bmVars != 0);
8242#if 0 //def VBOX_STRICT
8243 iemNativeRegAssertSanity(pReNative);
8244#endif
8245 }
8246
8247 uint8_t const cRegArgs = RT_MIN(cArgs, RT_ELEMENTS(g_aidxIemNativeCallRegs));
8248
8249 /*
8250 * First, go over the host registers that will be used for arguments and make
8251 * sure they either hold the desired argument or are free.
8252 */
8253 if (pReNative->Core.bmHstRegs & g_afIemNativeCallRegs[cRegArgs])
8254 {
8255 for (uint32_t i = 0; i < cRegArgs; i++)
8256 {
8257 uint8_t const idxArgReg = g_aidxIemNativeCallRegs[i];
8258 if (pReNative->Core.bmHstRegs & RT_BIT_32(idxArgReg))
8259 {
8260 if (pReNative->Core.aHstRegs[idxArgReg].enmWhat == kIemNativeWhat_Var)
8261 {
8262 uint8_t const idxVar = pReNative->Core.aHstRegs[idxArgReg].idxVar;
8263 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
8264 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
8265 Assert(pVar->idxReg == idxArgReg);
8266 uint8_t const uArgNo = pVar->uArgNo;
8267 if (uArgNo == i)
8268 { /* prefect */ }
8269 /* The variable allocator logic should make sure this is impossible,
8270 except for when the return register is used as a parameter (ARM,
8271 but not x86). */
8272#if RT_BIT_32(IEMNATIVE_CALL_RET_GREG) & IEMNATIVE_CALL_ARGS_GREG_MASK
8273 else if (idxArgReg == IEMNATIVE_CALL_RET_GREG && uArgNo != UINT8_MAX)
8274 {
8275# ifdef IEMNATIVE_FP_OFF_STACK_ARG0
8276# error "Implement this"
8277# endif
8278 Assert(uArgNo < IEMNATIVE_CALL_ARG_GREG_COUNT);
8279 uint8_t const idxFinalArgReg = g_aidxIemNativeCallRegs[uArgNo];
8280 AssertStmt(!(pReNative->Core.bmHstRegs & RT_BIT_32(idxFinalArgReg)),
8281 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_10));
8282 off = iemNativeRegMoveVar(pReNative, off, idxVar, idxArgReg, idxFinalArgReg, "iemNativeEmitCallCommon");
8283 }
8284#endif
8285 else
8286 {
8287 AssertStmt(uArgNo == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_10));
8288
8289 if (pVar->enmKind == kIemNativeVarKind_Stack)
8290 off = iemNativeRegMoveOrSpillStackVar(pReNative, off, idxVar);
8291 else
8292 {
8293 /* just free it, can be reloaded if used again */
8294 pVar->idxReg = UINT8_MAX;
8295 pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxArgReg);
8296 iemNativeRegClearGstRegShadowing(pReNative, idxArgReg, off);
8297 }
8298 }
8299 }
8300 else
8301 AssertStmt(pReNative->Core.aHstRegs[idxArgReg].enmWhat == kIemNativeWhat_Arg,
8302 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_8));
8303 }
8304 }
8305#if 0 //def VBOX_STRICT
8306 iemNativeRegAssertSanity(pReNative);
8307#endif
8308 }
8309
8310 Assert(!(pReNative->Core.bmHstRegs & g_afIemNativeCallRegs[cHiddenArgs])); /* No variables for hidden arguments. */
8311
8312#ifdef IEMNATIVE_FP_OFF_STACK_ARG0
8313 /*
8314 * If there are any stack arguments, make sure they are in their place as well.
8315 *
8316 * We can use IEMNATIVE_CALL_ARG0_GREG as temporary register since we'll (or
8317 * the caller) be loading it later and it must be free (see first loop).
8318 */
8319 if (cArgs > IEMNATIVE_CALL_ARG_GREG_COUNT)
8320 {
8321 for (unsigned i = IEMNATIVE_CALL_ARG_GREG_COUNT; i < cArgs; i++)
8322 {
8323 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[pReNative->Core.aidxArgVars[i]]; /* unpacked */
8324 int32_t const offBpDisp = g_aoffIemNativeCallStackArgBpDisp[i - IEMNATIVE_CALL_ARG_GREG_COUNT];
8325 if (pVar->idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
8326 {
8327 Assert(pVar->enmKind == kIemNativeVarKind_Stack); /* Imm as well? */
8328 off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDisp, pVar->idxReg);
8329 pReNative->Core.bmHstRegs &= ~RT_BIT_32(pVar->idxReg);
8330 pVar->idxReg = UINT8_MAX;
8331 }
8332 else
8333 {
8334 /* Use ARG0 as temp for stuff we need registers for. */
8335 switch (pVar->enmKind)
8336 {
8337 case kIemNativeVarKind_Stack:
8338 {
8339 uint8_t const idxStackSlot = pVar->idxStackSlot;
8340 AssertStmt(idxStackSlot != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_NOT_INITIALIZED));
8341 off = iemNativeEmitLoadGprByBp(pReNative, off, IEMNATIVE_CALL_ARG0_GREG /* is free */,
8342 iemNativeStackCalcBpDisp(idxStackSlot));
8343 off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDisp, IEMNATIVE_CALL_ARG0_GREG);
8344 continue;
8345 }
8346
8347 case kIemNativeVarKind_Immediate:
8348 off = iemNativeEmitStoreImm64ByBp(pReNative, off, offBpDisp, pVar->u.uValue);
8349 continue;
8350
8351 case kIemNativeVarKind_VarRef:
8352 {
8353 uint8_t const idxOtherVar = pVar->u.idxRefVar; /* unpacked */
8354 Assert(idxOtherVar < RT_ELEMENTS(pReNative->Core.aVars));
8355 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, IEMNATIVE_VAR_IDX_PACK(idxOtherVar));
8356 int32_t const offBpDispOther = iemNativeStackCalcBpDisp(idxStackSlot);
8357 uint8_t const idxRegOther = pReNative->Core.aVars[idxOtherVar].idxReg;
8358# ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
8359 bool const fSimdReg = pReNative->Core.aVars[idxOtherVar].fSimdReg;
8360 uint8_t const cbVar = pReNative->Core.aVars[idxOtherVar].cbVar;
8361 if ( fSimdReg
8362 && idxRegOther != UINT8_MAX)
8363 {
8364 Assert(idxRegOther < RT_ELEMENTS(pReNative->Core.aHstSimdRegs));
8365 if (cbVar == sizeof(RTUINT128U))
8366 off = iemNativeEmitStoreVecRegByBpU128(pReNative, off, offBpDispOther, idxRegOther);
8367 else
8368 off = iemNativeEmitStoreVecRegByBpU256(pReNative, off, offBpDispOther, idxRegOther);
8369 iemNativeSimdRegFreeVar(pReNative, idxRegOther, true); /** @todo const ref? */
8370 Assert(pReNative->Core.aVars[idxOtherVar].idxReg == UINT8_MAX);
8371 }
8372 else
8373# endif
8374 if (idxRegOther < RT_ELEMENTS(pReNative->Core.aHstRegs))
8375 {
8376 off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDispOther, idxRegOther);
8377 iemNativeRegFreeVar(pReNative, idxRegOther, true); /** @todo const ref? */
8378 Assert(pReNative->Core.aVars[idxOtherVar].idxReg == UINT8_MAX);
8379 }
8380 Assert( pReNative->Core.aVars[idxOtherVar].idxStackSlot != UINT8_MAX
8381 && pReNative->Core.aVars[idxOtherVar].idxReg == UINT8_MAX);
8382 off = iemNativeEmitLeaGprByBp(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, offBpDispOther);
8383 off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDisp, IEMNATIVE_CALL_ARG0_GREG);
8384 continue;
8385 }
8386
8387 case kIemNativeVarKind_GstRegRef:
8388 off = iemNativeEmitLeaGprByGstRegRef(pReNative, off, IEMNATIVE_CALL_ARG0_GREG,
8389 pVar->u.GstRegRef.enmClass, pVar->u.GstRegRef.idx);
8390 off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDisp, IEMNATIVE_CALL_ARG0_GREG);
8391 continue;
8392
8393 case kIemNativeVarKind_Invalid:
8394 case kIemNativeVarKind_End:
8395 break;
8396 }
8397 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_3));
8398 }
8399 }
8400# if 0 //def VBOX_STRICT
8401 iemNativeRegAssertSanity(pReNative);
8402# endif
8403 }
8404#else
8405 AssertCompile(IEMNATIVE_CALL_MAX_ARG_COUNT <= IEMNATIVE_CALL_ARG_GREG_COUNT);
8406#endif
8407
8408 /*
8409 * Make sure the argument variables are loaded into their respective registers.
8410 *
8411 * We can optimize this by ASSUMING that any register allocations are for
8412 * registeres that have already been loaded and are ready. The previous step
8413 * saw to that.
8414 */
8415 if (~pReNative->Core.bmHstRegs & (g_afIemNativeCallRegs[cRegArgs] & ~g_afIemNativeCallRegs[cHiddenArgs]))
8416 {
8417 for (unsigned i = cHiddenArgs; i < cRegArgs; i++)
8418 {
8419 uint8_t const idxArgReg = g_aidxIemNativeCallRegs[i];
8420 if (pReNative->Core.bmHstRegs & RT_BIT_32(idxArgReg))
8421 Assert( pReNative->Core.aHstRegs[idxArgReg].idxVar == IEMNATIVE_VAR_IDX_PACK(pReNative->Core.aidxArgVars[i])
8422 && pReNative->Core.aVars[pReNative->Core.aidxArgVars[i]].uArgNo == i
8423 && pReNative->Core.aVars[pReNative->Core.aidxArgVars[i]].idxReg == idxArgReg);
8424 else
8425 {
8426 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[pReNative->Core.aidxArgVars[i]]; /* unpacked */
8427 if (pVar->idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
8428 {
8429 Assert(pVar->enmKind == kIemNativeVarKind_Stack);
8430 off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxArgReg, pVar->idxReg);
8431 pReNative->Core.bmHstRegs = (pReNative->Core.bmHstRegs & ~RT_BIT_32(pVar->idxReg))
8432 | RT_BIT_32(idxArgReg);
8433 pVar->idxReg = idxArgReg;
8434 }
8435 else
8436 {
8437 /* Use ARG0 as temp for stuff we need registers for. */
8438 switch (pVar->enmKind)
8439 {
8440 case kIemNativeVarKind_Stack:
8441 {
8442 uint8_t const idxStackSlot = pVar->idxStackSlot;
8443 AssertStmt(idxStackSlot != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_NOT_INITIALIZED));
8444 off = iemNativeEmitLoadGprByBp(pReNative, off, idxArgReg, iemNativeStackCalcBpDisp(idxStackSlot));
8445 continue;
8446 }
8447
8448 case kIemNativeVarKind_Immediate:
8449 off = iemNativeEmitLoadGprImm64(pReNative, off, idxArgReg, pVar->u.uValue);
8450 continue;
8451
8452 case kIemNativeVarKind_VarRef:
8453 {
8454 uint8_t const idxOtherVar = pVar->u.idxRefVar; /* unpacked */
8455 Assert(idxOtherVar < RT_ELEMENTS(pReNative->Core.aVars));
8456 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative,
8457 IEMNATIVE_VAR_IDX_PACK(idxOtherVar));
8458 int32_t const offBpDispOther = iemNativeStackCalcBpDisp(idxStackSlot);
8459 uint8_t const idxRegOther = pReNative->Core.aVars[idxOtherVar].idxReg;
8460#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
8461 bool const fSimdReg = pReNative->Core.aVars[idxOtherVar].fSimdReg;
8462 uint8_t const cbVar = pReNative->Core.aVars[idxOtherVar].cbVar;
8463 if ( fSimdReg
8464 && idxRegOther != UINT8_MAX)
8465 {
8466 Assert(idxRegOther < RT_ELEMENTS(pReNative->Core.aHstSimdRegs));
8467 if (cbVar == sizeof(RTUINT128U))
8468 off = iemNativeEmitStoreVecRegByBpU128(pReNative, off, offBpDispOther, idxRegOther);
8469 else
8470 off = iemNativeEmitStoreVecRegByBpU256(pReNative, off, offBpDispOther, idxRegOther);
8471 iemNativeSimdRegFreeVar(pReNative, idxRegOther, true); /** @todo const ref? */
8472 Assert(pReNative->Core.aVars[idxOtherVar].idxReg == UINT8_MAX);
8473 }
8474 else
8475#endif
8476 if (idxRegOther < RT_ELEMENTS(pReNative->Core.aHstRegs))
8477 {
8478 off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDispOther, idxRegOther);
8479 iemNativeRegFreeVar(pReNative, idxRegOther, true); /** @todo const ref? */
8480 Assert(pReNative->Core.aVars[idxOtherVar].idxReg == UINT8_MAX);
8481 }
8482 Assert( pReNative->Core.aVars[idxOtherVar].idxStackSlot != UINT8_MAX
8483 && pReNative->Core.aVars[idxOtherVar].idxReg == UINT8_MAX);
8484 off = iemNativeEmitLeaGprByBp(pReNative, off, idxArgReg, offBpDispOther);
8485 continue;
8486 }
8487
8488 case kIemNativeVarKind_GstRegRef:
8489 off = iemNativeEmitLeaGprByGstRegRef(pReNative, off, idxArgReg,
8490 pVar->u.GstRegRef.enmClass, pVar->u.GstRegRef.idx);
8491 continue;
8492
8493 case kIemNativeVarKind_Invalid:
8494 case kIemNativeVarKind_End:
8495 break;
8496 }
8497 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_3));
8498 }
8499 }
8500 }
8501#if 0 //def VBOX_STRICT
8502 iemNativeRegAssertSanity(pReNative);
8503#endif
8504 }
8505#ifdef VBOX_STRICT
8506 else
8507 for (unsigned i = cHiddenArgs; i < cRegArgs; i++)
8508 {
8509 Assert(pReNative->Core.aVars[pReNative->Core.aidxArgVars[i]].uArgNo == i);
8510 Assert(pReNative->Core.aVars[pReNative->Core.aidxArgVars[i]].idxReg == g_aidxIemNativeCallRegs[i]);
8511 }
8512#endif
8513
8514 /*
8515 * Free all argument variables (simplified).
8516 * Their lifetime always expires with the call they are for.
8517 */
8518 /** @todo Make the python script check that arguments aren't used after
8519 * IEM_MC_CALL_XXXX. */
8520 /** @todo There is a special with IEM_MC_MEM_MAP_U16_RW and friends requiring
8521 * a IEM_MC_MEM_COMMIT_AND_UNMAP_RW after a AIMPL call typically with
8522 * an argument value. There is also some FPU stuff. */
8523 for (uint32_t i = cHiddenArgs; i < cArgs; i++)
8524 {
8525 uint8_t const idxVar = pReNative->Core.aidxArgVars[i]; /* unpacked */
8526 Assert(idxVar < RT_ELEMENTS(pReNative->Core.aVars));
8527
8528 /* no need to free registers: */
8529 AssertMsg(i < IEMNATIVE_CALL_ARG_GREG_COUNT
8530 ? pReNative->Core.aVars[idxVar].idxReg == g_aidxIemNativeCallRegs[i]
8531 || pReNative->Core.aVars[idxVar].idxReg == UINT8_MAX
8532 : pReNative->Core.aVars[idxVar].idxReg == UINT8_MAX,
8533 ("i=%d idxVar=%d idxReg=%d, expected %d\n", i, idxVar, pReNative->Core.aVars[idxVar].idxReg,
8534 i < IEMNATIVE_CALL_ARG_GREG_COUNT ? g_aidxIemNativeCallRegs[i] : UINT8_MAX));
8535
8536 pReNative->Core.aidxArgVars[i] = UINT8_MAX;
8537 pReNative->Core.bmVars &= ~RT_BIT_32(idxVar);
8538 iemNativeVarFreeStackSlots(pReNative, idxVar);
8539 }
8540 Assert(pReNative->Core.u64ArgVars == UINT64_MAX);
8541
8542 /*
8543 * Flush volatile registers as we make the call.
8544 */
8545 off = iemNativeRegMoveAndFreeAndFlushAtCall(pReNative, off, cRegArgs);
8546
8547 return off;
8548}
8549
8550
8551
8552/*********************************************************************************************************************************
8553* TLB Lookup. *
8554*********************************************************************************************************************************/
8555
8556/**
8557 * This is called via iemNativeHlpAsmSafeWrapCheckTlbLookup.
8558 */
8559DECLASM(void) iemNativeHlpCheckTlbLookup(PVMCPU pVCpu, uintptr_t uResult, uint64_t GCPtr, uint32_t uSegAndSizeAndAccess)
8560{
8561 uint8_t const iSegReg = RT_BYTE1(uSegAndSizeAndAccess);
8562 uint8_t const cbMem = RT_BYTE2(uSegAndSizeAndAccess);
8563 uint32_t const fAccess = uSegAndSizeAndAccess >> 16;
8564 Log(("iemNativeHlpCheckTlbLookup: %x:%#RX64 LB %#x fAccess=%#x -> %#RX64\n", iSegReg, GCPtr, cbMem, fAccess, uResult));
8565
8566 /* Do the lookup manually. */
8567 RTGCPTR const GCPtrFlat = iSegReg == UINT8_MAX ? GCPtr : GCPtr + pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
8568 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrFlat);
8569 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
8570 if (RT_LIKELY(pTlbe->uTag == uTag))
8571 {
8572 /*
8573 * Check TLB page table level access flags.
8574 */
8575 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
8576 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
8577 uint64_t const fNoWriteNoDirty = !(fAccess & IEM_ACCESS_TYPE_WRITE) ? 0
8578 : IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PG_NO_WRITE;
8579 uint64_t const fFlagsAndPhysRev = pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
8580 | IEMTLBE_F_PG_UNASSIGNED
8581 | IEMTLBE_F_PT_NO_ACCESSED
8582 | fNoWriteNoDirty | fNoUser);
8583 uint64_t const uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev;
8584 if (RT_LIKELY(fFlagsAndPhysRev == uTlbPhysRev))
8585 {
8586 /*
8587 * Return the address.
8588 */
8589 uint8_t const * const pbAddr = &pTlbe->pbMappingR3[GCPtrFlat & GUEST_PAGE_OFFSET_MASK];
8590 if ((uintptr_t)pbAddr == uResult)
8591 return;
8592 RT_NOREF(cbMem);
8593 AssertFailed();
8594 }
8595 else
8596 AssertMsgFailed(("fFlagsAndPhysRev=%#RX64 vs uTlbPhysRev=%#RX64: %#RX64\n",
8597 fFlagsAndPhysRev, uTlbPhysRev, fFlagsAndPhysRev ^ uTlbPhysRev));
8598 }
8599 else
8600 AssertFailed();
8601 RT_BREAKPOINT();
8602}
8603
8604/* The rest of the code is in IEMN8veRecompilerTlbLookup.h. */
8605
8606
8607
8608/*********************************************************************************************************************************
8609* Recompiler Core. *
8610*********************************************************************************************************************************/
8611
8612/** @callback_method_impl{FNDISREADBYTES, Dummy.} */
8613static DECLCALLBACK(int) iemNativeDisasReadBytesDummy(PDISSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
8614{
8615 RT_BZERO(&pDis->Instr.ab[offInstr], cbMaxRead);
8616 pDis->cbCachedInstr += cbMaxRead;
8617 RT_NOREF(cbMinRead);
8618 return VERR_NO_DATA;
8619}
8620
8621
8622DECLHIDDEN(const char *) iemNativeDbgVCpuOffsetToName(uint32_t off)
8623{
8624 static struct { uint32_t off; const char *pszName; } const s_aMembers[] =
8625 {
8626#define ENTRY(a_Member) { (uint32_t)RT_UOFFSETOF(VMCPUCC, a_Member), #a_Member } /* cast is for stupid MSC */
8627 ENTRY(fLocalForcedActions),
8628 ENTRY(iem.s.rcPassUp),
8629 ENTRY(iem.s.fExec),
8630 ENTRY(iem.s.pbInstrBuf),
8631 ENTRY(iem.s.uInstrBufPc),
8632 ENTRY(iem.s.GCPhysInstrBuf),
8633 ENTRY(iem.s.cbInstrBufTotal),
8634 ENTRY(iem.s.idxTbCurInstr),
8635#ifdef VBOX_WITH_STATISTICS
8636 ENTRY(iem.s.StatNativeTlbHitsForFetch),
8637 ENTRY(iem.s.StatNativeTlbHitsForStore),
8638 ENTRY(iem.s.StatNativeTlbHitsForStack),
8639 ENTRY(iem.s.StatNativeTlbHitsForMapped),
8640 ENTRY(iem.s.StatNativeCodeTlbMissesNewPage),
8641 ENTRY(iem.s.StatNativeCodeTlbHitsForNewPage),
8642 ENTRY(iem.s.StatNativeCodeTlbMissesNewPageWithOffset),
8643 ENTRY(iem.s.StatNativeCodeTlbHitsForNewPageWithOffset),
8644#endif
8645 ENTRY(iem.s.DataTlb.aEntries),
8646 ENTRY(iem.s.DataTlb.uTlbRevision),
8647 ENTRY(iem.s.DataTlb.uTlbPhysRev),
8648 ENTRY(iem.s.DataTlb.cTlbHits),
8649 ENTRY(iem.s.CodeTlb.aEntries),
8650 ENTRY(iem.s.CodeTlb.uTlbRevision),
8651 ENTRY(iem.s.CodeTlb.uTlbPhysRev),
8652 ENTRY(iem.s.CodeTlb.cTlbHits),
8653 ENTRY(pVMR3),
8654 ENTRY(cpum.GstCtx.rax),
8655 ENTRY(cpum.GstCtx.ah),
8656 ENTRY(cpum.GstCtx.rcx),
8657 ENTRY(cpum.GstCtx.ch),
8658 ENTRY(cpum.GstCtx.rdx),
8659 ENTRY(cpum.GstCtx.dh),
8660 ENTRY(cpum.GstCtx.rbx),
8661 ENTRY(cpum.GstCtx.bh),
8662 ENTRY(cpum.GstCtx.rsp),
8663 ENTRY(cpum.GstCtx.rbp),
8664 ENTRY(cpum.GstCtx.rsi),
8665 ENTRY(cpum.GstCtx.rdi),
8666 ENTRY(cpum.GstCtx.r8),
8667 ENTRY(cpum.GstCtx.r9),
8668 ENTRY(cpum.GstCtx.r10),
8669 ENTRY(cpum.GstCtx.r11),
8670 ENTRY(cpum.GstCtx.r12),
8671 ENTRY(cpum.GstCtx.r13),
8672 ENTRY(cpum.GstCtx.r14),
8673 ENTRY(cpum.GstCtx.r15),
8674 ENTRY(cpum.GstCtx.es.Sel),
8675 ENTRY(cpum.GstCtx.es.u64Base),
8676 ENTRY(cpum.GstCtx.es.u32Limit),
8677 ENTRY(cpum.GstCtx.es.Attr),
8678 ENTRY(cpum.GstCtx.cs.Sel),
8679 ENTRY(cpum.GstCtx.cs.u64Base),
8680 ENTRY(cpum.GstCtx.cs.u32Limit),
8681 ENTRY(cpum.GstCtx.cs.Attr),
8682 ENTRY(cpum.GstCtx.ss.Sel),
8683 ENTRY(cpum.GstCtx.ss.u64Base),
8684 ENTRY(cpum.GstCtx.ss.u32Limit),
8685 ENTRY(cpum.GstCtx.ss.Attr),
8686 ENTRY(cpum.GstCtx.ds.Sel),
8687 ENTRY(cpum.GstCtx.ds.u64Base),
8688 ENTRY(cpum.GstCtx.ds.u32Limit),
8689 ENTRY(cpum.GstCtx.ds.Attr),
8690 ENTRY(cpum.GstCtx.fs.Sel),
8691 ENTRY(cpum.GstCtx.fs.u64Base),
8692 ENTRY(cpum.GstCtx.fs.u32Limit),
8693 ENTRY(cpum.GstCtx.fs.Attr),
8694 ENTRY(cpum.GstCtx.gs.Sel),
8695 ENTRY(cpum.GstCtx.gs.u64Base),
8696 ENTRY(cpum.GstCtx.gs.u32Limit),
8697 ENTRY(cpum.GstCtx.gs.Attr),
8698 ENTRY(cpum.GstCtx.rip),
8699 ENTRY(cpum.GstCtx.eflags),
8700 ENTRY(cpum.GstCtx.uRipInhibitInt),
8701 ENTRY(cpum.GstCtx.cr0),
8702 ENTRY(cpum.GstCtx.cr4),
8703 ENTRY(cpum.GstCtx.aXcr[0]),
8704 ENTRY(cpum.GstCtx.aXcr[1]),
8705#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
8706 ENTRY(cpum.GstCtx.XState.x87.aXMM[0]),
8707 ENTRY(cpum.GstCtx.XState.x87.aXMM[1]),
8708 ENTRY(cpum.GstCtx.XState.x87.aXMM[2]),
8709 ENTRY(cpum.GstCtx.XState.x87.aXMM[3]),
8710 ENTRY(cpum.GstCtx.XState.x87.aXMM[4]),
8711 ENTRY(cpum.GstCtx.XState.x87.aXMM[5]),
8712 ENTRY(cpum.GstCtx.XState.x87.aXMM[6]),
8713 ENTRY(cpum.GstCtx.XState.x87.aXMM[7]),
8714 ENTRY(cpum.GstCtx.XState.x87.aXMM[8]),
8715 ENTRY(cpum.GstCtx.XState.x87.aXMM[9]),
8716 ENTRY(cpum.GstCtx.XState.x87.aXMM[10]),
8717 ENTRY(cpum.GstCtx.XState.x87.aXMM[11]),
8718 ENTRY(cpum.GstCtx.XState.x87.aXMM[12]),
8719 ENTRY(cpum.GstCtx.XState.x87.aXMM[13]),
8720 ENTRY(cpum.GstCtx.XState.x87.aXMM[14]),
8721 ENTRY(cpum.GstCtx.XState.x87.aXMM[15]),
8722 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[0]),
8723 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[1]),
8724 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[2]),
8725 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[3]),
8726 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[4]),
8727 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[5]),
8728 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[6]),
8729 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[7]),
8730 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[8]),
8731 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[9]),
8732 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[10]),
8733 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[11]),
8734 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[12]),
8735 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[13]),
8736 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[14]),
8737 ENTRY(cpum.GstCtx.XState.u.YmmHi.aYmmHi[15])
8738#endif
8739#undef ENTRY
8740 };
8741#ifdef VBOX_STRICT
8742 static bool s_fOrderChecked = false;
8743 if (!s_fOrderChecked)
8744 {
8745 s_fOrderChecked = true;
8746 uint32_t offPrev = s_aMembers[0].off;
8747 for (unsigned i = 1; i < RT_ELEMENTS(s_aMembers); i++)
8748 {
8749 Assert(s_aMembers[i].off > offPrev);
8750 offPrev = s_aMembers[i].off;
8751 }
8752 }
8753#endif
8754
8755 /*
8756 * Binary lookup.
8757 */
8758 unsigned iStart = 0;
8759 unsigned iEnd = RT_ELEMENTS(s_aMembers);
8760 for (;;)
8761 {
8762 unsigned const iCur = iStart + (iEnd - iStart) / 2;
8763 uint32_t const offCur = s_aMembers[iCur].off;
8764 if (off < offCur)
8765 {
8766 if (iCur != iStart)
8767 iEnd = iCur;
8768 else
8769 break;
8770 }
8771 else if (off > offCur)
8772 {
8773 if (iCur + 1 < iEnd)
8774 iStart = iCur + 1;
8775 else
8776 break;
8777 }
8778 else
8779 return s_aMembers[iCur].pszName;
8780 }
8781#ifdef VBOX_WITH_STATISTICS
8782 if (off - RT_UOFFSETOF(VMCPUCC, iem.s.acThreadedFuncStats) < RT_SIZEOFMEMB(VMCPUCC, iem.s.acThreadedFuncStats))
8783 return "iem.s.acThreadedFuncStats[iFn]";
8784#endif
8785 return NULL;
8786}
8787
8788
8789DECLHIDDEN(void) iemNativeDisassembleTb(PCIEMTB pTb, PCDBGFINFOHLP pHlp) RT_NOEXCEPT
8790{
8791 AssertReturnVoid((pTb->fFlags & IEMTB_F_TYPE_MASK) == IEMTB_F_TYPE_NATIVE);
8792#if defined(RT_ARCH_AMD64)
8793 static const char * const a_apszMarkers[] =
8794 {
8795 /*[0]=*/ "unknown0", "CheckCsLim", "ConsiderLimChecking", "CheckOpcodes",
8796 /*[4]=*/ "PcAfterBranch", "LoadTlbForNewPage", "LoadTlbAfterBranch"
8797 };
8798#endif
8799
8800 char szDisBuf[512];
8801 DISSTATE Dis;
8802 PCIEMNATIVEINSTR const paNative = pTb->Native.paInstructions;
8803 uint32_t const cNative = pTb->Native.cInstructions;
8804 uint32_t offNative = 0;
8805#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
8806 PCIEMTBDBG const pDbgInfo = pTb->pDbgInfo;
8807#endif
8808 DISCPUMODE enmGstCpuMode = (pTb->fFlags & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT ? DISCPUMODE_16BIT
8809 : (pTb->fFlags & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT ? DISCPUMODE_32BIT
8810 : DISCPUMODE_64BIT;
8811#if defined(RT_ARCH_AMD64) && !defined(VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER)
8812 DISCPUMODE const enmHstCpuMode = DISCPUMODE_64BIT;
8813#elif defined(RT_ARCH_ARM64) && !defined(VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER)
8814 DISCPUMODE const enmHstCpuMode = DISCPUMODE_ARMV8_A64;
8815#elif !defined(VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER)
8816# error "Port me"
8817#else
8818 csh hDisasm = ~(size_t)0;
8819# if defined(RT_ARCH_AMD64)
8820 cs_err rcCs = cs_open(CS_ARCH_X86, CS_MODE_LITTLE_ENDIAN | CS_MODE_64, &hDisasm);
8821# elif defined(RT_ARCH_ARM64)
8822 cs_err rcCs = cs_open(CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN, &hDisasm);
8823# else
8824# error "Port me"
8825# endif
8826 AssertMsgReturnVoid(rcCs == CS_ERR_OK, ("%d (%#x)\n", rcCs, rcCs));
8827
8828 //rcCs = cs_option(hDisasm, CS_OPT_DETAIL, CS_OPT_ON); - not needed as pInstr->detail doesn't provide full memory detail.
8829 //Assert(rcCs == CS_ERR_OK);
8830#endif
8831
8832 /*
8833 * Print TB info.
8834 */
8835 pHlp->pfnPrintf(pHlp,
8836 "pTb=%p: GCPhysPc=%RGp cInstructions=%u LB %#x cRanges=%u\n"
8837 "pTb=%p: cUsed=%u msLastUsed=%u fFlags=%#010x %s\n",
8838 pTb, pTb->GCPhysPc, pTb->cInstructions, pTb->cbOpcodes, pTb->cRanges,
8839 pTb, pTb->cUsed, pTb->msLastUsed, pTb->fFlags, iemTbFlagsToString(pTb->fFlags, szDisBuf, sizeof(szDisBuf)));
8840#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
8841 if (pDbgInfo && pDbgInfo->cEntries > 1)
8842 {
8843 Assert(pDbgInfo->aEntries[0].Gen.uType == kIemTbDbgEntryType_NativeOffset);
8844
8845 /*
8846 * This disassembly is driven by the debug info which follows the native
8847 * code and indicates when it starts with the next guest instructions,
8848 * where labels are and such things.
8849 */
8850 uint32_t idxThreadedCall = 0;
8851 uint32_t fExec = pTb->fFlags & UINT32_C(0x00ffffff);
8852 uint8_t idxRange = UINT8_MAX;
8853 uint8_t const cRanges = RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges));
8854 uint32_t offRange = 0;
8855 uint32_t offOpcodes = 0;
8856 uint32_t const cbOpcodes = pTb->cbOpcodes;
8857 RTGCPHYS GCPhysPc = pTb->GCPhysPc;
8858 uint32_t const cDbgEntries = pDbgInfo->cEntries;
8859 uint32_t iDbgEntry = 1;
8860 uint32_t offDbgNativeNext = pDbgInfo->aEntries[0].NativeOffset.offNative;
8861
8862 while (offNative < cNative)
8863 {
8864 /* If we're at or have passed the point where the next chunk of debug
8865 info starts, process it. */
8866 if (offDbgNativeNext <= offNative)
8867 {
8868 offDbgNativeNext = UINT32_MAX;
8869 for (; iDbgEntry < cDbgEntries; iDbgEntry++)
8870 {
8871 switch (pDbgInfo->aEntries[iDbgEntry].Gen.uType)
8872 {
8873 case kIemTbDbgEntryType_GuestInstruction:
8874 {
8875 /* Did the exec flag change? */
8876 if (fExec != pDbgInfo->aEntries[iDbgEntry].GuestInstruction.fExec)
8877 {
8878 pHlp->pfnPrintf(pHlp,
8879 " fExec change %#08x -> %#08x %s\n",
8880 fExec, pDbgInfo->aEntries[iDbgEntry].GuestInstruction.fExec,
8881 iemTbFlagsToString(pDbgInfo->aEntries[iDbgEntry].GuestInstruction.fExec,
8882 szDisBuf, sizeof(szDisBuf)));
8883 fExec = pDbgInfo->aEntries[iDbgEntry].GuestInstruction.fExec;
8884 enmGstCpuMode = (fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT ? DISCPUMODE_16BIT
8885 : (fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT ? DISCPUMODE_32BIT
8886 : DISCPUMODE_64BIT;
8887 }
8888
8889 /* New opcode range? We need to fend up a spurious debug info entry here for cases
8890 where the compilation was aborted before the opcode was recorded and the actual
8891 instruction was translated to a threaded call. This may happen when we run out
8892 of ranges, or when some complicated interrupts/FFs are found to be pending or
8893 similar. So, we just deal with it here rather than in the compiler code as it
8894 is a lot simpler to do here. */
8895 if ( idxRange == UINT8_MAX
8896 || idxRange >= cRanges
8897 || offRange >= pTb->aRanges[idxRange].cbOpcodes)
8898 {
8899 idxRange += 1;
8900 if (idxRange < cRanges)
8901 offRange = !idxRange ? 0 : offRange - pTb->aRanges[idxRange - 1].cbOpcodes;
8902 else
8903 continue;
8904 Assert(offOpcodes == pTb->aRanges[idxRange].offOpcodes + offRange);
8905 GCPhysPc = pTb->aRanges[idxRange].offPhysPage
8906 + (pTb->aRanges[idxRange].idxPhysPage == 0
8907 ? pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK
8908 : pTb->aGCPhysPages[pTb->aRanges[idxRange].idxPhysPage - 1]);
8909 pHlp->pfnPrintf(pHlp, " Range #%u: GCPhysPc=%RGp LB %#x [idxPg=%d]\n",
8910 idxRange, GCPhysPc, pTb->aRanges[idxRange].cbOpcodes,
8911 pTb->aRanges[idxRange].idxPhysPage);
8912 GCPhysPc += offRange;
8913 }
8914
8915 /* Disassemble the instruction. */
8916 //uint8_t const cbInstrMax = RT_MIN(pTb->aRanges[idxRange].cbOpcodes - offRange, 15);
8917 uint8_t const cbInstrMax = RT_MIN(cbOpcodes - offOpcodes, 15);
8918 uint32_t cbInstr = 1;
8919 int rc = DISInstrWithPrefetchedBytes(GCPhysPc, enmGstCpuMode, DISOPTYPE_ALL,
8920 &pTb->pabOpcodes[offOpcodes], cbInstrMax,
8921 iemNativeDisasReadBytesDummy, NULL, &Dis, &cbInstr);
8922 if (RT_SUCCESS(rc))
8923 {
8924 size_t cch = DISFormatYasmEx(&Dis, szDisBuf, sizeof(szDisBuf),
8925 DIS_FMT_FLAGS_BYTES_WIDTH_MAKE(10) | DIS_FMT_FLAGS_BYTES_LEFT
8926 | DIS_FMT_FLAGS_RELATIVE_BRANCH | DIS_FMT_FLAGS_C_HEX,
8927 NULL /*pfnGetSymbol*/, NULL /*pvUser*/);
8928
8929 static unsigned const s_offMarker = 55;
8930 static char const s_szMarker[] = " ; <--- guest";
8931 if (cch < s_offMarker)
8932 {
8933 memset(&szDisBuf[cch], ' ', s_offMarker - cch);
8934 cch = s_offMarker;
8935 }
8936 if (cch + sizeof(s_szMarker) <= sizeof(szDisBuf))
8937 memcpy(&szDisBuf[cch], s_szMarker, sizeof(s_szMarker));
8938
8939 pHlp->pfnPrintf(pHlp, " %%%%%RGp: %s\n", GCPhysPc, szDisBuf);
8940 }
8941 else
8942 {
8943 pHlp->pfnPrintf(pHlp, " %%%%%RGp: %.*Rhxs - guest disassembly failure %Rrc\n",
8944 GCPhysPc, cbInstrMax, &pTb->pabOpcodes[offOpcodes], rc);
8945 cbInstr = 1;
8946 }
8947 GCPhysPc += cbInstr;
8948 offOpcodes += cbInstr;
8949 offRange += cbInstr;
8950 continue;
8951 }
8952
8953 case kIemTbDbgEntryType_ThreadedCall:
8954 pHlp->pfnPrintf(pHlp,
8955 " Call #%u to %s (%u args) - %s\n",
8956 idxThreadedCall,
8957 g_apszIemThreadedFunctions[pDbgInfo->aEntries[iDbgEntry].ThreadedCall.enmCall],
8958 g_acIemThreadedFunctionUsedArgs[pDbgInfo->aEntries[iDbgEntry].ThreadedCall.enmCall],
8959 pDbgInfo->aEntries[iDbgEntry].ThreadedCall.fRecompiled ? "recompiled" : "todo");
8960 idxThreadedCall++;
8961 continue;
8962
8963 case kIemTbDbgEntryType_GuestRegShadowing:
8964 {
8965 PCIEMTBDBGENTRY const pEntry = &pDbgInfo->aEntries[iDbgEntry];
8966 const char * const pszGstReg = g_aGstShadowInfo[pEntry->GuestRegShadowing.idxGstReg].pszName;
8967 if (pEntry->GuestRegShadowing.idxHstReg == UINT8_MAX)
8968 pHlp->pfnPrintf(pHlp, " Guest register %s != host register %s\n", pszGstReg,
8969 g_apszIemNativeHstRegNames[pEntry->GuestRegShadowing.idxHstRegPrev]);
8970 else if (pEntry->GuestRegShadowing.idxHstRegPrev == UINT8_MAX)
8971 pHlp->pfnPrintf(pHlp, " Guest register %s == host register %s\n", pszGstReg,
8972 g_apszIemNativeHstRegNames[pEntry->GuestRegShadowing.idxHstReg]);
8973 else
8974 pHlp->pfnPrintf(pHlp, " Guest register %s == host register %s (previously in %s)\n", pszGstReg,
8975 g_apszIemNativeHstRegNames[pEntry->GuestRegShadowing.idxHstReg],
8976 g_apszIemNativeHstRegNames[pEntry->GuestRegShadowing.idxHstRegPrev]);
8977 continue;
8978 }
8979
8980#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
8981 case kIemTbDbgEntryType_GuestSimdRegShadowing:
8982 {
8983 PCIEMTBDBGENTRY const pEntry = &pDbgInfo->aEntries[iDbgEntry];
8984 const char * const pszGstReg = g_aGstSimdShadowInfo[pEntry->GuestSimdRegShadowing.idxGstSimdReg].pszName;
8985 if (pEntry->GuestSimdRegShadowing.idxHstSimdReg == UINT8_MAX)
8986 pHlp->pfnPrintf(pHlp, " Guest SIMD register %s != host SIMD register %s\n", pszGstReg,
8987 g_apszIemNativeHstSimdRegNames[pEntry->GuestSimdRegShadowing.idxHstSimdRegPrev]);
8988 else if (pEntry->GuestSimdRegShadowing.idxHstSimdRegPrev == UINT8_MAX)
8989 pHlp->pfnPrintf(pHlp, " Guest SIMD register %s == host SIMD register %s\n", pszGstReg,
8990 g_apszIemNativeHstSimdRegNames[pEntry->GuestSimdRegShadowing.idxHstSimdReg]);
8991 else
8992 pHlp->pfnPrintf(pHlp, " Guest SIMD register %s == host SIMD register %s (previously in %s)\n", pszGstReg,
8993 g_apszIemNativeHstSimdRegNames[pEntry->GuestSimdRegShadowing.idxHstSimdReg],
8994 g_apszIemNativeHstSimdRegNames[pEntry->GuestSimdRegShadowing.idxHstSimdRegPrev]);
8995 continue;
8996 }
8997#endif
8998
8999 case kIemTbDbgEntryType_Label:
9000 {
9001 const char *pszName = "what_the_fudge";
9002 const char *pszComment = "";
9003 bool fNumbered = pDbgInfo->aEntries[iDbgEntry].Label.uData != 0;
9004 switch ((IEMNATIVELABELTYPE)pDbgInfo->aEntries[iDbgEntry].Label.enmLabel)
9005 {
9006 case kIemNativeLabelType_Return: pszName = "Return"; break;
9007 case kIemNativeLabelType_ReturnBreak: pszName = "ReturnBreak"; break;
9008 case kIemNativeLabelType_ReturnWithFlags: pszName = "ReturnWithFlags"; break;
9009 case kIemNativeLabelType_NonZeroRetOrPassUp: pszName = "NonZeroRetOrPassUp"; break;
9010 case kIemNativeLabelType_RaiseDe: pszName = "RaiseDe"; break;
9011 case kIemNativeLabelType_RaiseUd: pszName = "RaiseUd"; break;
9012 case kIemNativeLabelType_RaiseSseRelated: pszName = "RaiseSseRelated"; break;
9013 case kIemNativeLabelType_RaiseAvxRelated: pszName = "RaiseAvxRelated"; break;
9014 case kIemNativeLabelType_RaiseNm: pszName = "RaiseNm"; break;
9015 case kIemNativeLabelType_RaiseGp0: pszName = "RaiseGp0"; break;
9016 case kIemNativeLabelType_RaiseMf: pszName = "RaiseMf"; break;
9017 case kIemNativeLabelType_RaiseXf: pszName = "RaiseXf"; break;
9018 case kIemNativeLabelType_ObsoleteTb: pszName = "ObsoleteTb"; break;
9019 case kIemNativeLabelType_NeedCsLimChecking: pszName = "NeedCsLimChecking"; break;
9020 case kIemNativeLabelType_CheckBranchMiss: pszName = "CheckBranchMiss"; break;
9021 case kIemNativeLabelType_If:
9022 pszName = "If";
9023 fNumbered = true;
9024 break;
9025 case kIemNativeLabelType_Else:
9026 pszName = "Else";
9027 fNumbered = true;
9028 pszComment = " ; regs state restored pre-if-block";
9029 break;
9030 case kIemNativeLabelType_Endif:
9031 pszName = "Endif";
9032 fNumbered = true;
9033 break;
9034 case kIemNativeLabelType_CheckIrq:
9035 pszName = "CheckIrq_CheckVM";
9036 fNumbered = true;
9037 break;
9038 case kIemNativeLabelType_TlbLookup:
9039 pszName = "TlbLookup";
9040 fNumbered = true;
9041 break;
9042 case kIemNativeLabelType_TlbMiss:
9043 pszName = "TlbMiss";
9044 fNumbered = true;
9045 break;
9046 case kIemNativeLabelType_TlbDone:
9047 pszName = "TlbDone";
9048 fNumbered = true;
9049 break;
9050 case kIemNativeLabelType_Invalid:
9051 case kIemNativeLabelType_End:
9052 break;
9053 }
9054 if (fNumbered)
9055 pHlp->pfnPrintf(pHlp, " %s_%u:%s\n", pszName, pDbgInfo->aEntries[iDbgEntry].Label.uData, pszComment);
9056 else
9057 pHlp->pfnPrintf(pHlp, " %s:\n", pszName);
9058 continue;
9059 }
9060
9061 case kIemTbDbgEntryType_NativeOffset:
9062 offDbgNativeNext = pDbgInfo->aEntries[iDbgEntry].NativeOffset.offNative;
9063 Assert(offDbgNativeNext > offNative);
9064 break;
9065
9066#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
9067 case kIemTbDbgEntryType_DelayedPcUpdate:
9068 pHlp->pfnPrintf(pHlp,
9069 " Updating guest PC value by %u (cInstrSkipped=%u)\n",
9070 pDbgInfo->aEntries[iDbgEntry].DelayedPcUpdate.offPc,
9071 pDbgInfo->aEntries[iDbgEntry].DelayedPcUpdate.cInstrSkipped);
9072 continue;
9073#endif
9074
9075 default:
9076 AssertFailed();
9077 }
9078 iDbgEntry++;
9079 break;
9080 }
9081 }
9082
9083 /*
9084 * Disassemble the next native instruction.
9085 */
9086 PCIEMNATIVEINSTR const pNativeCur = &paNative[offNative];
9087# ifndef VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER
9088 uint32_t cbInstr = sizeof(paNative[0]);
9089 int const rc = DISInstr(pNativeCur, enmHstCpuMode, &Dis, &cbInstr);
9090 if (RT_SUCCESS(rc))
9091 {
9092# if defined(RT_ARCH_AMD64)
9093 if (Dis.pCurInstr->uOpcode == OP_NOP && cbInstr == 7) /* iemNativeEmitMarker */
9094 {
9095 uint32_t const uInfo = *(uint32_t const *)&Dis.Instr.ab[3];
9096 if (RT_HIWORD(uInfo) < kIemThreadedFunc_End)
9097 pHlp->pfnPrintf(pHlp, " %p: nop ; marker: call #%u to %s (%u args) - %s\n",
9098 pNativeCur, uInfo & 0x7fff, g_apszIemThreadedFunctions[RT_HIWORD(uInfo)],
9099 g_acIemThreadedFunctionUsedArgs[RT_HIWORD(uInfo)],
9100 uInfo & 0x8000 ? "recompiled" : "todo");
9101 else if ((uInfo & ~RT_BIT_32(31)) < RT_ELEMENTS(a_apszMarkers))
9102 pHlp->pfnPrintf(pHlp, " %p: nop ; marker: %s\n", pNativeCur, a_apszMarkers[uInfo & ~RT_BIT_32(31)]);
9103 else
9104 pHlp->pfnPrintf(pHlp, " %p: nop ; unknown marker: %#x (%d)\n", pNativeCur, uInfo, uInfo);
9105 }
9106 else
9107# endif
9108 {
9109 const char *pszAnnotation = NULL;
9110# ifdef RT_ARCH_AMD64
9111 DISFormatYasmEx(&Dis, szDisBuf, sizeof(szDisBuf),
9112 DIS_FMT_FLAGS_BYTES_WIDTH_MAKE(10) | DIS_FMT_FLAGS_BYTES_LEFT
9113 | DIS_FMT_FLAGS_RELATIVE_BRANCH | DIS_FMT_FLAGS_C_HEX,
9114 NULL /*pfnGetSymbol*/, NULL /*pvUser*/);
9115 PCDISOPPARAM pMemOp;
9116 if (DISUSE_IS_EFFECTIVE_ADDR(Dis.Param1.fUse))
9117 pMemOp = &Dis.Param1;
9118 else if (DISUSE_IS_EFFECTIVE_ADDR(Dis.Param2.fUse))
9119 pMemOp = &Dis.Param2;
9120 else if (DISUSE_IS_EFFECTIVE_ADDR(Dis.Param3.fUse))
9121 pMemOp = &Dis.Param3;
9122 else
9123 pMemOp = NULL;
9124 if ( pMemOp
9125 && pMemOp->x86.Base.idxGenReg == IEMNATIVE_REG_FIXED_PVMCPU
9126 && (pMemOp->fUse & (DISUSE_BASE | DISUSE_REG_GEN64)) == (DISUSE_BASE | DISUSE_REG_GEN64))
9127 pszAnnotation = iemNativeDbgVCpuOffsetToName(pMemOp->fUse & DISUSE_DISPLACEMENT32
9128 ? pMemOp->x86.uDisp.u32 : pMemOp->x86.uDisp.u8);
9129
9130#elif defined(RT_ARCH_ARM64)
9131 DISFormatArmV8Ex(&Dis, szDisBuf, sizeof(szDisBuf),
9132 DIS_FMT_FLAGS_BYTES_LEFT | DIS_FMT_FLAGS_RELATIVE_BRANCH | DIS_FMT_FLAGS_C_HEX,
9133 NULL /*pfnGetSymbol*/, NULL /*pvUser*/);
9134# else
9135# error "Port me"
9136# endif
9137 if (pszAnnotation)
9138 {
9139 static unsigned const s_offAnnotation = 55;
9140 size_t const cchAnnotation = strlen(pszAnnotation);
9141 size_t cchDis = strlen(szDisBuf);
9142 if (RT_MAX(cchDis, s_offAnnotation) + sizeof(" ; ") + cchAnnotation <= sizeof(szDisBuf))
9143 {
9144 if (cchDis < s_offAnnotation)
9145 {
9146 memset(&szDisBuf[cchDis], ' ', s_offAnnotation - cchDis);
9147 cchDis = s_offAnnotation;
9148 }
9149 szDisBuf[cchDis++] = ' ';
9150 szDisBuf[cchDis++] = ';';
9151 szDisBuf[cchDis++] = ' ';
9152 memcpy(&szDisBuf[cchDis], pszAnnotation, cchAnnotation + 1);
9153 }
9154 }
9155 pHlp->pfnPrintf(pHlp, " %p: %s\n", pNativeCur, szDisBuf);
9156 }
9157 }
9158 else
9159 {
9160# if defined(RT_ARCH_AMD64)
9161 pHlp->pfnPrintf(pHlp, " %p: %.*Rhxs - disassembly failure %Rrc\n",
9162 pNativeCur, RT_MIN(cNative - offNative, 16), pNativeCur, rc);
9163# elif defined(RT_ARCH_ARM64)
9164 pHlp->pfnPrintf(pHlp, " %p: %#010RX32 - disassembly failure %Rrc\n", pNativeCur, *pNativeCur, rc);
9165# else
9166# error "Port me"
9167# endif
9168 cbInstr = sizeof(paNative[0]);
9169 }
9170 offNative += cbInstr / sizeof(paNative[0]);
9171
9172# else /* VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER */
9173 cs_insn *pInstr;
9174 size_t cInstrs = cs_disasm(hDisasm, (const uint8_t *)pNativeCur, (cNative - offNative) * sizeof(*pNativeCur),
9175 (uintptr_t)pNativeCur, 1, &pInstr);
9176 if (cInstrs > 0)
9177 {
9178 Assert(cInstrs == 1);
9179 const char *pszAnnotation = NULL;
9180# if defined(RT_ARCH_ARM64)
9181 if ( (pInstr->id >= ARM64_INS_LD1 && pInstr->id < ARM64_INS_LSL)
9182 || (pInstr->id >= ARM64_INS_ST1 && pInstr->id < ARM64_INS_SUB))
9183 {
9184 /* This is bit crappy, but the disassembler provides incomplete addressing details. */
9185 AssertCompile(IEMNATIVE_REG_FIXED_PVMCPU == 28 && IEMNATIVE_REG_FIXED_PCPUMCTX == 27);
9186 char *psz = strchr(pInstr->op_str, '[');
9187 if (psz && psz[1] == 'x' && psz[2] == '2' && (psz[3] == '7' || psz[3] == '8'))
9188 {
9189 uint32_t const offVCpu = psz[3] == '8'? 0 : RT_UOFFSETOF(VMCPU, cpum.GstCtx);
9190 int32_t off = -1;
9191 psz += 4;
9192 if (*psz == ']')
9193 off = 0;
9194 else if (*psz == ',')
9195 {
9196 psz = RTStrStripL(psz + 1);
9197 if (*psz == '#')
9198 off = RTStrToInt32(&psz[1]);
9199 /** @todo deal with index registers and LSL as well... */
9200 }
9201 if (off >= 0)
9202 pszAnnotation = iemNativeDbgVCpuOffsetToName(offVCpu + (uint32_t)off);
9203 }
9204 }
9205# endif
9206
9207 size_t const cchOp = strlen(pInstr->op_str);
9208# if defined(RT_ARCH_AMD64)
9209 if (pszAnnotation)
9210 pHlp->pfnPrintf(pHlp, " %p: %.*Rhxs %-7s %s%*s ; %s\n",
9211 pNativeCur, pInstr->size, pNativeCur, pInstr->mnemonic, pInstr->op_str,
9212 cchOp < 55 ? 55 - cchOp : 0, "", pszAnnotation);
9213 else
9214 pHlp->pfnPrintf(pHlp, " %p: %.*Rhxs %-7s %s\n",
9215 pNativeCur, pInstr->size, pNativeCur, pInstr->mnemonic, pInstr->op_str);
9216
9217# else
9218 if (pszAnnotation)
9219 pHlp->pfnPrintf(pHlp, " %p: %#010RX32 %-7s %s%*s ; %s\n",
9220 pNativeCur, *pNativeCur, pInstr->mnemonic, pInstr->op_str,
9221 cchOp < 55 ? 55 - cchOp : 0, "", pszAnnotation);
9222 else
9223 pHlp->pfnPrintf(pHlp, " %p: %#010RX32 %-7s %s\n",
9224 pNativeCur, *pNativeCur, pInstr->mnemonic, pInstr->op_str);
9225# endif
9226 offNative += pInstr->size / sizeof(*pNativeCur);
9227 cs_free(pInstr, cInstrs);
9228 }
9229 else
9230 {
9231# if defined(RT_ARCH_AMD64)
9232 pHlp->pfnPrintf(pHlp, " %p: %.*Rhxs - disassembly failure %d\n",
9233 pNativeCur, RT_MIN(cNative - offNative, 16), pNativeCur, cs_errno(hDisasm)));
9234# else
9235 pHlp->pfnPrintf(pHlp, " %p: %#010RX32 - disassembly failure %d\n", pNativeCur, *pNativeCur, cs_errno(hDisasm));
9236# endif
9237 offNative++;
9238 }
9239# endif /* VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER */
9240 }
9241 }
9242 else
9243#endif /* IEMNATIVE_WITH_TB_DEBUG_INFO */
9244 {
9245 /*
9246 * No debug info, just disassemble the x86 code and then the native code.
9247 *
9248 * First the guest code:
9249 */
9250 for (unsigned i = 0; i < pTb->cRanges; i++)
9251 {
9252 RTGCPHYS GCPhysPc = pTb->aRanges[i].offPhysPage
9253 + (pTb->aRanges[i].idxPhysPage == 0
9254 ? pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK
9255 : pTb->aGCPhysPages[pTb->aRanges[i].idxPhysPage - 1]);
9256 pHlp->pfnPrintf(pHlp, " Range #%u: GCPhysPc=%RGp LB %#x [idxPg=%d]\n",
9257 i, GCPhysPc, pTb->aRanges[i].cbOpcodes, pTb->aRanges[i].idxPhysPage);
9258 unsigned off = pTb->aRanges[i].offOpcodes;
9259 /** @todo this ain't working when crossing pages! */
9260 unsigned const cbOpcodes = pTb->aRanges[i].cbOpcodes + off;
9261 while (off < cbOpcodes)
9262 {
9263 uint32_t cbInstr = 1;
9264 int rc = DISInstrWithPrefetchedBytes(GCPhysPc, enmGstCpuMode, DISOPTYPE_ALL,
9265 &pTb->pabOpcodes[off], cbOpcodes - off,
9266 iemNativeDisasReadBytesDummy, NULL, &Dis, &cbInstr);
9267 if (RT_SUCCESS(rc))
9268 {
9269 DISFormatYasmEx(&Dis, szDisBuf, sizeof(szDisBuf),
9270 DIS_FMT_FLAGS_BYTES_WIDTH_MAKE(10) | DIS_FMT_FLAGS_BYTES_LEFT
9271 | DIS_FMT_FLAGS_RELATIVE_BRANCH | DIS_FMT_FLAGS_C_HEX,
9272 NULL /*pfnGetSymbol*/, NULL /*pvUser*/);
9273 pHlp->pfnPrintf(pHlp, " %RGp: %s\n", GCPhysPc, szDisBuf);
9274 GCPhysPc += cbInstr;
9275 off += cbInstr;
9276 }
9277 else
9278 {
9279 pHlp->pfnPrintf(pHlp, " %RGp: %.*Rhxs - disassembly failure %Rrc\n",
9280 GCPhysPc, cbOpcodes - off, &pTb->pabOpcodes[off], rc);
9281 break;
9282 }
9283 }
9284 }
9285
9286 /*
9287 * Then the native code:
9288 */
9289 pHlp->pfnPrintf(pHlp, " Native code %p L %#x\n", paNative, cNative);
9290 while (offNative < cNative)
9291 {
9292 PCIEMNATIVEINSTR const pNativeCur = &paNative[offNative];
9293# ifndef VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER
9294 uint32_t cbInstr = sizeof(paNative[0]);
9295 int const rc = DISInstr(pNativeCur, enmHstCpuMode, &Dis, &cbInstr);
9296 if (RT_SUCCESS(rc))
9297 {
9298# if defined(RT_ARCH_AMD64)
9299 if (Dis.pCurInstr->uOpcode == OP_NOP && cbInstr == 7) /* iemNativeEmitMarker */
9300 {
9301 uint32_t const uInfo = *(uint32_t const *)&Dis.Instr.ab[3];
9302 if (RT_HIWORD(uInfo) < kIemThreadedFunc_End)
9303 pHlp->pfnPrintf(pHlp, "\n %p: nop ; marker: call #%u to %s (%u args) - %s\n",
9304 pNativeCur, uInfo & 0x7fff, g_apszIemThreadedFunctions[RT_HIWORD(uInfo)],
9305 g_acIemThreadedFunctionUsedArgs[RT_HIWORD(uInfo)],
9306 uInfo & 0x8000 ? "recompiled" : "todo");
9307 else if ((uInfo & ~RT_BIT_32(31)) < RT_ELEMENTS(a_apszMarkers))
9308 pHlp->pfnPrintf(pHlp, " %p: nop ; marker: %s\n", pNativeCur, a_apszMarkers[uInfo & ~RT_BIT_32(31)]);
9309 else
9310 pHlp->pfnPrintf(pHlp, " %p: nop ; unknown marker: %#x (%d)\n", pNativeCur, uInfo, uInfo);
9311 }
9312 else
9313# endif
9314 {
9315# ifdef RT_ARCH_AMD64
9316 DISFormatYasmEx(&Dis, szDisBuf, sizeof(szDisBuf),
9317 DIS_FMT_FLAGS_BYTES_WIDTH_MAKE(10) | DIS_FMT_FLAGS_BYTES_LEFT
9318 | DIS_FMT_FLAGS_RELATIVE_BRANCH | DIS_FMT_FLAGS_C_HEX,
9319 NULL /*pfnGetSymbol*/, NULL /*pvUser*/);
9320# elif defined(RT_ARCH_ARM64)
9321 DISFormatArmV8Ex(&Dis, szDisBuf, sizeof(szDisBuf),
9322 DIS_FMT_FLAGS_BYTES_LEFT | DIS_FMT_FLAGS_RELATIVE_BRANCH | DIS_FMT_FLAGS_C_HEX,
9323 NULL /*pfnGetSymbol*/, NULL /*pvUser*/);
9324# else
9325# error "Port me"
9326# endif
9327 pHlp->pfnPrintf(pHlp, " %p: %s\n", pNativeCur, szDisBuf);
9328 }
9329 }
9330 else
9331 {
9332# if defined(RT_ARCH_AMD64)
9333 pHlp->pfnPrintf(pHlp, " %p: %.*Rhxs - disassembly failure %Rrc\n",
9334 pNativeCur, RT_MIN(cNative - offNative, 16), pNativeCur, rc);
9335# else
9336 pHlp->pfnPrintf(pHlp, " %p: %#010RX32 - disassembly failure %Rrc\n", pNativeCur, *pNativeCur, rc);
9337# endif
9338 cbInstr = sizeof(paNative[0]);
9339 }
9340 offNative += cbInstr / sizeof(paNative[0]);
9341
9342# else /* VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER */
9343 cs_insn *pInstr;
9344 size_t cInstrs = cs_disasm(hDisasm, (const uint8_t *)pNativeCur, (cNative - offNative) * sizeof(*pNativeCur),
9345 (uintptr_t)pNativeCur, 1, &pInstr);
9346 if (cInstrs > 0)
9347 {
9348 Assert(cInstrs == 1);
9349# if defined(RT_ARCH_AMD64)
9350 pHlp->pfnPrintf(pHlp, " %p: %.*Rhxs %-7s %s\n",
9351 pNativeCur, pInstr->size, pNativeCur, pInstr->mnemonic, pInstr->op_str);
9352# else
9353 pHlp->pfnPrintf(pHlp, " %p: %#010RX32 %-7s %s\n",
9354 pNativeCur, *pNativeCur, pInstr->mnemonic, pInstr->op_str);
9355# endif
9356 offNative += pInstr->size / sizeof(*pNativeCur);
9357 cs_free(pInstr, cInstrs);
9358 }
9359 else
9360 {
9361# if defined(RT_ARCH_AMD64)
9362 pHlp->pfnPrintf(pHlp, " %p: %.*Rhxs - disassembly failure %d\n",
9363 pNativeCur, RT_MIN(cNative - offNative, 16), pNativeCur, cs_errno(hDisasm)));
9364# else
9365 pHlp->pfnPrintf(pHlp, " %p: %#010RX32 - disassembly failure %d\n", pNativeCur, *pNativeCur, cs_errno(hDisasm));
9366# endif
9367 offNative++;
9368 }
9369# endif /* VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER */
9370 }
9371 }
9372
9373#ifdef VBOX_WITH_IEM_USING_CAPSTONE_DISASSEMBLER
9374 /* Cleanup. */
9375 cs_close(&hDisasm);
9376#endif
9377}
9378
9379
9380/**
9381 * Recompiles the given threaded TB into a native one.
9382 *
9383 * In case of failure the translation block will be returned as-is.
9384 *
9385 * @returns pTb.
9386 * @param pVCpu The cross context virtual CPU structure of the calling
9387 * thread.
9388 * @param pTb The threaded translation to recompile to native.
9389 */
9390DECLHIDDEN(PIEMTB) iemNativeRecompile(PVMCPUCC pVCpu, PIEMTB pTb) RT_NOEXCEPT
9391{
9392 STAM_REL_PROFILE_START(&pVCpu->iem.s.StatNativeRecompilation, a);
9393
9394 /*
9395 * The first time thru, we allocate the recompiler state, the other times
9396 * we just need to reset it before using it again.
9397 */
9398 PIEMRECOMPILERSTATE pReNative = pVCpu->iem.s.pNativeRecompilerStateR3;
9399 if (RT_LIKELY(pReNative))
9400 iemNativeReInit(pReNative, pTb);
9401 else
9402 {
9403 pReNative = iemNativeInit(pVCpu, pTb);
9404 AssertReturn(pReNative, pTb);
9405 }
9406
9407#ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
9408 /*
9409 * First do liveness analysis. This is done backwards.
9410 */
9411 {
9412 uint32_t idxCall = pTb->Thrd.cCalls;
9413 if (idxCall <= pReNative->cLivenessEntriesAlloc)
9414 { /* likely */ }
9415 else
9416 {
9417 uint32_t cAlloc = RT_MAX(pReNative->cLivenessEntriesAlloc, _4K);
9418 while (idxCall > cAlloc)
9419 cAlloc *= 2;
9420 void *pvNew = RTMemRealloc(pReNative->paLivenessEntries, sizeof(pReNative->paLivenessEntries[0]) * cAlloc);
9421 AssertReturn(pvNew, pTb);
9422 pReNative->paLivenessEntries = (PIEMLIVENESSENTRY)pvNew;
9423 pReNative->cLivenessEntriesAlloc = cAlloc;
9424 }
9425 AssertReturn(idxCall > 0, pTb);
9426 PIEMLIVENESSENTRY const paLivenessEntries = pReNative->paLivenessEntries;
9427
9428 /* The initial (final) entry. */
9429 idxCall--;
9430 IEM_LIVENESS_RAW_INIT_AS_UNUSED(&paLivenessEntries[idxCall]);
9431
9432 /* Loop backwards thru the calls and fill in the other entries. */
9433 PCIEMTHRDEDCALLENTRY pCallEntry = &pTb->Thrd.paCalls[idxCall];
9434 while (idxCall > 0)
9435 {
9436 PFNIEMNATIVELIVENESSFUNC const pfnLiveness = g_apfnIemNativeLivenessFunctions[pCallEntry->enmFunction];
9437 if (pfnLiveness)
9438 pfnLiveness(pCallEntry, &paLivenessEntries[idxCall], &paLivenessEntries[idxCall - 1]);
9439 else
9440 IEM_LIVENESS_RAW_INIT_WITH_XCPT_OR_CALL(&paLivenessEntries[idxCall - 1], &paLivenessEntries[idxCall]);
9441 pCallEntry--;
9442 idxCall--;
9443 }
9444
9445# ifdef VBOX_WITH_STATISTICS
9446 /* Check if there are any EFLAGS optimization to be had here. This requires someone settings them
9447 to 'clobbered' rather that 'input'. */
9448 /** @todo */
9449# endif
9450 }
9451#endif
9452
9453 /*
9454 * Recompiling and emitting code is done using try/throw/catch or setjmp/longjmp
9455 * for aborting if an error happens.
9456 */
9457 uint32_t cCallsLeft = pTb->Thrd.cCalls;
9458#ifdef LOG_ENABLED
9459 uint32_t const cCallsOrg = cCallsLeft;
9460#endif
9461 uint32_t off = 0;
9462 int rc = VINF_SUCCESS;
9463 IEMNATIVE_TRY_SETJMP(pReNative, rc)
9464 {
9465 /*
9466 * Emit prolog code (fixed).
9467 */
9468 off = iemNativeEmitProlog(pReNative, off);
9469
9470 /*
9471 * Convert the calls to native code.
9472 */
9473#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
9474 int32_t iGstInstr = -1;
9475#endif
9476#ifndef VBOX_WITHOUT_RELEASE_STATISTICS
9477 uint32_t cThreadedCalls = 0;
9478 uint32_t cRecompiledCalls = 0;
9479#endif
9480#if defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS) || defined(VBOX_STRICT) || defined(LOG_ENABLED)
9481 uint32_t idxCurCall = 0;
9482#endif
9483 PCIEMTHRDEDCALLENTRY pCallEntry = pTb->Thrd.paCalls;
9484 pReNative->fExec = pTb->fFlags & IEMTB_F_IEM_F_MASK;
9485 while (cCallsLeft-- > 0)
9486 {
9487 PFNIEMNATIVERECOMPFUNC const pfnRecom = g_apfnIemNativeRecompileFunctions[pCallEntry->enmFunction];
9488#ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
9489 pReNative->idxCurCall = idxCurCall;
9490#endif
9491
9492 /*
9493 * Debug info, assembly markup and statistics.
9494 */
9495#if defined(IEMNATIVE_WITH_TB_DEBUG_INFO) || !defined(IEMNATIVE_WITH_BLTIN_CHECKMODE)
9496 if (pCallEntry->enmFunction == kIemThreadedFunc_BltIn_CheckMode)
9497 pReNative->fExec = pCallEntry->auParams[0] & IEMTB_F_IEM_F_MASK;
9498#endif
9499#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
9500 iemNativeDbgInfoAddNativeOffset(pReNative, off);
9501 if (iGstInstr < (int32_t)pCallEntry->idxInstr)
9502 {
9503 if (iGstInstr < (int32_t)pTb->cInstructions)
9504 iemNativeDbgInfoAddGuestInstruction(pReNative, pReNative->fExec);
9505 else
9506 Assert(iGstInstr == pTb->cInstructions);
9507 iGstInstr = pCallEntry->idxInstr;
9508 }
9509 iemNativeDbgInfoAddThreadedCall(pReNative, (IEMTHREADEDFUNCS)pCallEntry->enmFunction, pfnRecom != NULL);
9510#endif
9511#if defined(VBOX_STRICT)
9512 off = iemNativeEmitMarker(pReNative, off,
9513 RT_MAKE_U32(idxCurCall | (pfnRecom ? 0x8000 : 0), pCallEntry->enmFunction));
9514#endif
9515#if defined(VBOX_STRICT)
9516 iemNativeRegAssertSanity(pReNative);
9517#endif
9518#ifdef VBOX_WITH_STATISTICS
9519 off = iemNativeEmitThreadCallStats(pReNative, off, pCallEntry);
9520#endif
9521
9522 /*
9523 * Actual work.
9524 */
9525 Log2(("%u[%u]: %s%s\n", idxCurCall, pCallEntry->idxInstr, g_apszIemThreadedFunctions[pCallEntry->enmFunction],
9526 pfnRecom ? "(recompiled)" : "(todo)"));
9527 if (pfnRecom) /** @todo stats on this. */
9528 {
9529 off = pfnRecom(pReNative, off, pCallEntry);
9530 STAM_REL_STATS({cRecompiledCalls++;});
9531 }
9532 else
9533 {
9534 off = iemNativeEmitThreadedCall(pReNative, off, pCallEntry);
9535 STAM_REL_STATS({cThreadedCalls++;});
9536 }
9537 Assert(off <= pReNative->cInstrBufAlloc);
9538 Assert(pReNative->cCondDepth == 0);
9539
9540#if defined(LOG_ENABLED) && defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS)
9541 if (LogIs2Enabled())
9542 {
9543 PCIEMLIVENESSENTRY pLivenessEntry = &pReNative->paLivenessEntries[idxCurCall];
9544# ifndef IEMLIVENESS_EXTENDED_LAYOUT
9545 static const char s_achState[] = "CUXI";
9546# else
9547 static const char s_achState[] = "UxRrWwMmCcQqKkNn";
9548# endif
9549
9550 char szGpr[17];
9551 for (unsigned i = 0; i < 16; i++)
9552 szGpr[i] = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_GprFirst)];
9553 szGpr[16] = '\0';
9554
9555 char szSegBase[X86_SREG_COUNT + 1];
9556 char szSegLimit[X86_SREG_COUNT + 1];
9557 char szSegAttrib[X86_SREG_COUNT + 1];
9558 char szSegSel[X86_SREG_COUNT + 1];
9559 for (unsigned i = 0; i < X86_SREG_COUNT; i++)
9560 {
9561 szSegBase[i] = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_SegBaseFirst)];
9562 szSegAttrib[i] = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_SegAttribFirst)];
9563 szSegLimit[i] = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_SegLimitFirst)];
9564 szSegSel[i] = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_SegSelFirst)];
9565 }
9566 szSegBase[X86_SREG_COUNT] = szSegAttrib[X86_SREG_COUNT] = szSegLimit[X86_SREG_COUNT]
9567 = szSegSel[X86_SREG_COUNT] = '\0';
9568
9569 char szEFlags[8];
9570 for (unsigned i = 0; i < 7; i++)
9571 szEFlags[i] = s_achState[iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, i + kIemNativeGstReg_EFlags)];
9572 szEFlags[7] = '\0';
9573
9574 Log2(("liveness: grp=%s segbase=%s segattr=%s seglim=%s segsel=%s efl=%s\n",
9575 szGpr, szSegBase, szSegAttrib, szSegLimit, szSegSel, szEFlags));
9576 }
9577#endif
9578
9579 /*
9580 * Advance.
9581 */
9582 pCallEntry++;
9583#if defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS) || defined(VBOX_STRICT) || defined(LOG_ENABLED)
9584 idxCurCall++;
9585#endif
9586 }
9587
9588 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->iem.s.StatNativeCallsRecompiled, cRecompiledCalls);
9589 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->iem.s.StatNativeCallsThreaded, cThreadedCalls);
9590 if (!cThreadedCalls)
9591 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatNativeFullyRecompiledTbs);
9592
9593 /*
9594 * Emit the epilog code.
9595 */
9596 uint32_t idxReturnLabel;
9597 off = iemNativeEmitEpilog(pReNative, off, &idxReturnLabel);
9598
9599 /*
9600 * Generate special jump labels.
9601 */
9602 if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_ReturnBreak))
9603 off = iemNativeEmitReturnBreak(pReNative, off, idxReturnLabel);
9604 if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_ReturnWithFlags))
9605 off = iemNativeEmitReturnWithFlags(pReNative, off, idxReturnLabel);
9606
9607 /*
9608 * Generate simple TB tail labels that just calls a help with a pVCpu
9609 * arg and either return or longjmps/throws a non-zero status.
9610 *
9611 * The array entries must be ordered by enmLabel value so we can index
9612 * using fTailLabels bit numbers.
9613 */
9614 typedef IEM_DECL_NATIVE_HLP_PTR(int, PFNIEMNATIVESIMPLETAILLABELCALL,(PVMCPUCC pVCpu));
9615 static struct
9616 {
9617 IEMNATIVELABELTYPE enmLabel;
9618 PFNIEMNATIVESIMPLETAILLABELCALL pfnCallback;
9619 } const g_aSimpleTailLabels[] =
9620 {
9621 { kIemNativeLabelType_Invalid, NULL },
9622 { kIemNativeLabelType_RaiseDe, iemNativeHlpExecRaiseDe },
9623 { kIemNativeLabelType_RaiseUd, iemNativeHlpExecRaiseUd },
9624 { kIemNativeLabelType_RaiseSseRelated, iemNativeHlpExecRaiseSseRelated },
9625 { kIemNativeLabelType_RaiseAvxRelated, iemNativeHlpExecRaiseAvxRelated },
9626 { kIemNativeLabelType_RaiseNm, iemNativeHlpExecRaiseNm },
9627 { kIemNativeLabelType_RaiseGp0, iemNativeHlpExecRaiseGp0 },
9628 { kIemNativeLabelType_RaiseMf, iemNativeHlpExecRaiseMf },
9629 { kIemNativeLabelType_RaiseXf, iemNativeHlpExecRaiseXf },
9630 { kIemNativeLabelType_ObsoleteTb, iemNativeHlpObsoleteTb },
9631 { kIemNativeLabelType_NeedCsLimChecking, iemNativeHlpNeedCsLimChecking },
9632 { kIemNativeLabelType_CheckBranchMiss, iemNativeHlpCheckBranchMiss },
9633 };
9634 AssertCompile(RT_ELEMENTS(g_aSimpleTailLabels) == (unsigned)kIemNativeLabelType_LastSimple + 1U);
9635 AssertCompile(kIemNativeLabelType_Invalid == 0);
9636 uint64_t fTailLabels = pReNative->bmLabelTypes & (RT_BIT_64(kIemNativeLabelType_LastSimple + 1U) - 2U);
9637 if (fTailLabels)
9638 {
9639 do
9640 {
9641 IEMNATIVELABELTYPE const enmLabel = (IEMNATIVELABELTYPE)(ASMBitFirstSetU64(fTailLabels) - 1U);
9642 fTailLabels &= ~RT_BIT_64(enmLabel);
9643 Assert(g_aSimpleTailLabels[enmLabel].enmLabel == enmLabel);
9644
9645 uint32_t const idxLabel = iemNativeLabelFind(pReNative, enmLabel);
9646 Assert(idxLabel != UINT32_MAX);
9647 if (idxLabel != UINT32_MAX)
9648 {
9649 iemNativeLabelDefine(pReNative, idxLabel, off);
9650
9651 /* int pfnCallback(PVMCPUCC pVCpu) */
9652 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
9653 off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)g_aSimpleTailLabels[enmLabel].pfnCallback);
9654
9655 /* jump back to the return sequence. */
9656 off = iemNativeEmitJmpToLabel(pReNative, off, idxReturnLabel);
9657 }
9658
9659 } while (fTailLabels);
9660 }
9661 }
9662 IEMNATIVE_CATCH_LONGJMP_BEGIN(pReNative, rc);
9663 {
9664 Log(("iemNativeRecompile: Caught %Rrc while recompiling!\n", rc));
9665 return pTb;
9666 }
9667 IEMNATIVE_CATCH_LONGJMP_END(pReNative);
9668 Assert(off <= pReNative->cInstrBufAlloc);
9669
9670 /*
9671 * Make sure all labels has been defined.
9672 */
9673 PIEMNATIVELABEL const paLabels = pReNative->paLabels;
9674#ifdef VBOX_STRICT
9675 uint32_t const cLabels = pReNative->cLabels;
9676 for (uint32_t i = 0; i < cLabels; i++)
9677 AssertMsgReturn(paLabels[i].off < off, ("i=%d enmType=%d\n", i, paLabels[i].enmType), pTb);
9678#endif
9679
9680 /*
9681 * Allocate executable memory, copy over the code we've generated.
9682 */
9683 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
9684 if (pTbAllocator->pDelayedFreeHead)
9685 iemTbAllocatorProcessDelayedFrees(pVCpu, pVCpu->iem.s.pTbAllocatorR3);
9686
9687 PIEMNATIVEINSTR const paFinalInstrBuf = (PIEMNATIVEINSTR)iemExecMemAllocatorAlloc(pVCpu, off * sizeof(IEMNATIVEINSTR));
9688 AssertReturn(paFinalInstrBuf, pTb);
9689 memcpy(paFinalInstrBuf, pReNative->pInstrBuf, off * sizeof(paFinalInstrBuf[0]));
9690
9691 /*
9692 * Apply fixups.
9693 */
9694 PIEMNATIVEFIXUP const paFixups = pReNative->paFixups;
9695 uint32_t const cFixups = pReNative->cFixups;
9696 for (uint32_t i = 0; i < cFixups; i++)
9697 {
9698 Assert(paFixups[i].off < off);
9699 Assert(paFixups[i].idxLabel < cLabels);
9700 AssertMsg(paLabels[paFixups[i].idxLabel].off < off,
9701 ("idxLabel=%d enmType=%d off=%#x (max %#x)\n", paFixups[i].idxLabel,
9702 paLabels[paFixups[i].idxLabel].enmType, paLabels[paFixups[i].idxLabel].off, off));
9703 RTPTRUNION const Ptr = { &paFinalInstrBuf[paFixups[i].off] };
9704 switch (paFixups[i].enmType)
9705 {
9706#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
9707 case kIemNativeFixupType_Rel32:
9708 Assert(paFixups[i].off + 4 <= off);
9709 *Ptr.pi32 = paLabels[paFixups[i].idxLabel].off - paFixups[i].off + paFixups[i].offAddend;
9710 continue;
9711
9712#elif defined(RT_ARCH_ARM64)
9713 case kIemNativeFixupType_RelImm26At0:
9714 {
9715 Assert(paFixups[i].off < off);
9716 int32_t const offDisp = paLabels[paFixups[i].idxLabel].off - paFixups[i].off + paFixups[i].offAddend;
9717 Assert(offDisp >= -262144 && offDisp < 262144);
9718 *Ptr.pu32 = (*Ptr.pu32 & UINT32_C(0xfc000000)) | ((uint32_t)offDisp & UINT32_C(0x03ffffff));
9719 continue;
9720 }
9721
9722 case kIemNativeFixupType_RelImm19At5:
9723 {
9724 Assert(paFixups[i].off < off);
9725 int32_t const offDisp = paLabels[paFixups[i].idxLabel].off - paFixups[i].off + paFixups[i].offAddend;
9726 Assert(offDisp >= -262144 && offDisp < 262144);
9727 *Ptr.pu32 = (*Ptr.pu32 & UINT32_C(0xff00001f)) | (((uint32_t)offDisp & UINT32_C(0x0007ffff)) << 5);
9728 continue;
9729 }
9730
9731 case kIemNativeFixupType_RelImm14At5:
9732 {
9733 Assert(paFixups[i].off < off);
9734 int32_t const offDisp = paLabels[paFixups[i].idxLabel].off - paFixups[i].off + paFixups[i].offAddend;
9735 Assert(offDisp >= -8192 && offDisp < 8192);
9736 *Ptr.pu32 = (*Ptr.pu32 & UINT32_C(0xfff8001f)) | (((uint32_t)offDisp & UINT32_C(0x00003fff)) << 5);
9737 continue;
9738 }
9739
9740#endif
9741 case kIemNativeFixupType_Invalid:
9742 case kIemNativeFixupType_End:
9743 break;
9744 }
9745 AssertFailed();
9746 }
9747
9748 iemExecMemAllocatorReadyForUse(pVCpu, paFinalInstrBuf, off * sizeof(IEMNATIVEINSTR));
9749 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->iem.s.StatTbNativeCode, off * sizeof(IEMNATIVEINSTR));
9750
9751 /*
9752 * Convert the translation block.
9753 */
9754 RTMemFree(pTb->Thrd.paCalls);
9755 pTb->Native.paInstructions = paFinalInstrBuf;
9756 pTb->Native.cInstructions = off;
9757 pTb->fFlags = (pTb->fFlags & ~IEMTB_F_TYPE_MASK) | IEMTB_F_TYPE_NATIVE;
9758#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
9759 pTb->pDbgInfo = (PIEMTBDBG)RTMemDup(pReNative->pDbgInfo, /* non-fatal, so not return check. */
9760 RT_UOFFSETOF_DYN(IEMTBDBG, aEntries[pReNative->pDbgInfo->cEntries]));
9761#endif
9762
9763 Assert(pTbAllocator->cThreadedTbs > 0);
9764 pTbAllocator->cThreadedTbs -= 1;
9765 pTbAllocator->cNativeTbs += 1;
9766 Assert(pTbAllocator->cNativeTbs <= pTbAllocator->cTotalTbs);
9767
9768#ifdef LOG_ENABLED
9769 /*
9770 * Disassemble to the log if enabled.
9771 */
9772 if (LogIs3Enabled())
9773 {
9774 Log3(("----------------------------------------- %d calls ---------------------------------------\n", cCallsOrg));
9775 iemNativeDisassembleTb(pTb, DBGFR3InfoLogHlp());
9776# if defined(DEBUG_bird) || defined(DEBUG_aeichner)
9777 RTLogFlush(NULL);
9778# endif
9779 }
9780#endif
9781 /*iemNativeDisassembleTb(pTb, DBGFR3InfoLogRelHlp());*/
9782
9783 STAM_REL_PROFILE_STOP(&pVCpu->iem.s.StatNativeRecompilation, a);
9784 return pTb;
9785}
9786
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette