VirtualBox

Changeset 101203 in vbox


Ignore:
Timestamp:
Sep 20, 2023 2:41:16 PM (18 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
159176
Message:

VMM/IEM: Reworked the native recompiler state passing and moved most of the emitter code (+ structures) into a separate header file both to shut up clang unused inlined function warnings and in preparation of emitting code from IEM_MC_BEGIN/END blocks. bugref:10370

Location:
trunk/src/VBox/VMM
Files:
1 added
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r101193 r101203  
    7878#include "IEMInline.h"
    7979#include "IEMThreadedFunctions.h"
     80#include "IEMN8veRecompiler.h"
    8081
    8182
     
    156157
    157158
    158 
    159159/*********************************************************************************************************************************
    160160*   Executable Memory Allocator                                                                                                  *
     
    192192    void                   *pvChunk;
    193193#ifdef IN_RING3
    194 # ifdef RT_OS_WINDOWS
    195     /** Pointer to the unwind information.  This is allocated from hHeap on
    196      *  windows because (at least for AMD64) the UNWIND_INFO structure address
    197      *  in the RUNTIME_FUNCTION entry is an RVA and the chunk is the "image".  */
     194    /**
     195     * Pointer to the unwind information.
     196     *
     197     * This is used during C++ throw and longjmp (windows and probably most other
     198     * platforms).  Some debuggers (windbg) makes use of it as well.
     199     *
     200     * Windows: This is allocated from hHeap on windows because (at least for
     201     *          AMD64) the UNWIND_INFO structure address in the
     202     *          RUNTIME_FUNCTION entry is an RVA and the chunk is the "image".
     203     *
     204     * Others:  Allocated from the regular heap to avoid unnecessary executable data
     205     *          structures.  This points to an IEMEXECMEMCHUNKEHFRAME structure. */
    198206    void                   *pvUnwindInfo;
    199 # else
    200     /** Exception handling frame information for proper unwinding during C++
    201      *  throws and (possibly) longjmp(). */
    202     PIEMEXECMEMCHUNKEHFRAME pEhFrame;
    203 # endif
    204207#elif defined(IN_RING0)
    205208    /** Allocation handle. */
     
    267270 * Initializes the unwind info structures for windows hosts.
    268271 */
    269 static void *iemExecMemAllocatorInitAndRegisterUnwindInfoForChunk(PIEMEXECMEMALLOCATOR pExecMemAllocator,
    270                                                                   RTHEAPSIMPLE hHeap, void *pvChunk)
     272static void *
     273iemExecMemAllocatorInitAndRegisterUnwindInfoForChunk(PIEMEXECMEMALLOCATOR pExecMemAllocator, RTHEAPSIMPLE hHeap, void *pvChunk)
    271274{
    272275    /*
     
    423426
    424427
     428#  if 0 /* unused */
    425429/**
    426430 * Emits a register (@a uReg) save location, using signed offset:
     
    434438    return Ptr;
    435439}
     440#  endif
    436441
    437442
     
    439444 * Initializes the unwind info section for non-windows hosts.
    440445 */
    441 static void iemExecMemAllocatorInitEhFrameForChunk(PIEMEXECMEMALLOCATOR pExecMemAllocator,
    442                                                    PIEMEXECMEMCHUNKEHFRAME pEhFrame, void *pvChunk)
    443 {
    444     RTPTRUNION Ptr = { pEhFrame };
     446static PIEMEXECMEMCHUNKEHFRAME
     447iemExecMemAllocatorInitAndRegisterUnwindInfoForChunk(PIEMEXECMEMALLOCATOR pExecMemAllocator, void *pvChunk)
     448{
     449    /*
     450     * Allocate the structure for the eh_frame data and associate registration stuff.
     451     */
     452    PIEMEXECMEMCHUNKEHFRAME pEhFrame = (PIEMEXECMEMCHUNKEHFRAME)RTMemAllocZ(sizeof(IEMEXECMEMCHUNKEHFRAME));
     453    AssertReturn(pEhFrame, NULL);
     454
     455    RTPTRUNION Ptr = { pEhFrame->abEhFrame };
    445456
    446457    /*
    447458     * Generate the CIE first.
    448459     */
    449 #ifdef IEMNATIVE_USE_LIBUNWIND /* libunwind (llvm, darwin) only supports v1 and v3. */
     460#  ifdef IEMNATIVE_USE_LIBUNWIND /* libunwind (llvm, darwin) only supports v1 and v3. */
    450461    uint8_t const iDwarfVer = 3;
    451 #else
     462#  else
    452463    uint8_t const iDwarfVer = 4;
    453 #endif
     464#  endif
    454465    RTPTRUNION const PtrCie = Ptr;
    455466    *Ptr.pu32++ = 123;                                      /* The CIE length will be determined later. */
     
    482493     * Generate an FDE for the whole chunk area.
    483494     */
    484 #ifdef IEMNATIVE_USE_LIBUNWIND
     495#  ifdef IEMNATIVE_USE_LIBUNWIND
    485496    pEhFrame->offFda = Ptr.u - (uintptr_t)&pEhFrame->abEhFrame[0];
    486 #endif
     497#  endif
    487498    RTPTRUNION const PtrFde = Ptr;
    488499    *Ptr.pu32++ = 123;                                      /* The CIE length will be determined later. */
     
    501512    *Ptr.pu32++ = 0;            /* just to be sure... */
    502513    Assert(Ptr.u - (uintptr_t)&pEhFrame->abEhFrame[0] <= sizeof(pEhFrame->abEhFrame));
     514
     515    /*
     516     * Register it.
     517     */
     518#  ifdef IEMNATIVE_USE_LIBUNWIND
     519    __register_frame(&pEhFrame->abEhFrame[pEhFrame->offFda]);
     520#  else
     521    memset(pEhFrame->abObject, 0xf6, sizeof(pEhFrame->abObject)); /* color the memory to better spot usage */
     522    __register_frame_info(pEhFrame->abEhFrame, pEhFrame->abObject);
     523#  endif
     524    return pEhFrame;
    503525}
    504526
     
    577599#ifdef IN_RING3
    578600# ifdef RT_OS_WINDOWS
    579             /*
    580              * The unwind information need to reside inside the chunk (at least
    581              * the UNWIND_INFO structures does), as the UnwindInfoAddress member
    582              * of RUNTIME_FUNCTION (AMD64) is relative to the "image base".
    583              *
    584              * We need unwind info because even longjmp() does a C++ stack unwind.
    585              */
    586601            void *pvUnwindInfo = iemExecMemAllocatorInitAndRegisterUnwindInfoForChunk(pExecMemAllocator, hHeap, pvChunk);
    587602            AssertStmt(pvUnwindInfo, rc = VERR_INTERNAL_ERROR_3);
    588603# else
    589             /*
    590              * Generate an .eh_frame section for the chunk and register it so
    591              * the unwinding code works (required for C++ exceptions and
    592              * probably also for longjmp()).
    593              */
    594             PIEMEXECMEMCHUNKEHFRAME pEhFrame = (PIEMEXECMEMCHUNKEHFRAME)RTMemAllocZ(sizeof(IEMEXECMEMCHUNKEHFRAME));
    595             if (pEhFrame)
    596             {
    597                 iemExecMemAllocatorInitEhFrameForChunk(pExecMemAllocator, pEhFrame, pvChunk);
    598 #  ifdef IEMNATIVE_USE_LIBUNWIND
    599                 __register_frame(&pEhFrame->abEhFrame[pEhFrame->offFda]);
    600 #  else
    601                 memset(pEhFrame->abObject, 0xf6, sizeof(pEhFrame->abObject)); /* color the memory to better spot usage */
    602                 __register_frame_info(pEhFrame->abEhFrame, pEhFrame->abObject);
    603 #  endif
    604             }
    605             else
    606                 rc = VERR_NO_MEMORY;
     604            void *pvUnwindInfo = iemExecMemAllocatorInitAndRegisterUnwindInfoForChunk(pExecMemAllocator, pvChunk);
     605            AssertStmt(pvUnwindInfo, rc = VERR_NO_MEMORY);
    607606# endif
    608607            if (RT_SUCCESS(rc))
     
    615614                pExecMemAllocator->aChunks[idxChunk].hHeap        = hHeap;
    616615#ifdef IN_RING3
    617 # ifdef RT_OS_WINDOWS
    618616                pExecMemAllocator->aChunks[idxChunk].pvUnwindInfo = pvUnwindInfo;
    619 # else
    620                 pExecMemAllocator->aChunks[idxChunk].pEhFrame     = pEhFrame;
    621 # endif
    622617#endif
    623618
     
    708703    for (uint32_t i = 0; i < cMaxChunks; i++)
    709704    {
    710         pExecMemAllocator->aChunks[i].hHeap    = NIL_RTHEAPSIMPLE;
    711         pExecMemAllocator->aChunks[i].pvChunk  = NULL;
     705        pExecMemAllocator->aChunks[i].hHeap        = NIL_RTHEAPSIMPLE;
     706        pExecMemAllocator->aChunks[i].pvChunk      = NULL;
    712707#ifdef IN_RING0
    713         pExecMemAllocator->aChunks[i].hMemObj  = NIL_RTR0MEMOBJ;
    714 #elif !defined(RT_OS_WINDOWS)
    715         pExecMemAllocator->aChunks[i].pEhFrame = NULL;
     708        pExecMemAllocator->aChunks[i].hMemObj      = NIL_RTR0MEMOBJ;
     709#else
     710        pExecMemAllocator->aChunks[i].pvUnwindInfo = NULL;
    716711#endif
    717712    }
     
    858853*********************************************************************************************************************************/
    859854
    860 /** Native code generator label types. */
    861 typedef enum
    862 {
    863     kIemNativeLabelType_Invalid = 0,
    864     kIemNativeLabelType_Return,
    865     kIemNativeLabelType_NonZeroRetOrPassUp,
    866     kIemNativeLabelType_End
    867 } IEMNATIVELABELTYPE;
    868 
    869 /** Native code generator label definition. */
    870 typedef struct IEMNATIVELABEL
    871 {
    872     /** Code offset if defined, UINT32_MAX if it needs to be generated after/in
    873      * the epilog. */
    874     uint32_t    off;
    875     /** The type of label (IEMNATIVELABELTYPE). */
    876     uint16_t    enmType;
    877     /** Additional label data, type specific. */
    878     uint16_t    uData;
    879 } IEMNATIVELABEL;
    880 /** Pointer to a label. */
    881 typedef IEMNATIVELABEL *PIEMNATIVELABEL;
    882 
    883 
    884 /** Native code generator fixup types.  */
    885 typedef enum
    886 {
    887     kIemNativeFixupType_Invalid = 0,
    888 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    889     /** AMD64 fixup: PC relative 32-bit with addend in bData. */
    890     kIemNativeFixupType_Rel32,
    891 #elif defined(RT_ARCH_ARM64)
    892 #endif
    893     kIemNativeFixupType_End
    894 } IEMNATIVEFIXUPTYPE;
    895 
    896 /** Native code generator fixup. */
    897 typedef struct IEMNATIVEFIXUP
    898 {
    899     /** Code offset of the fixup location. */
    900     uint32_t    off;
    901     /** The IEMNATIVELABEL this is a fixup for. */
    902     uint16_t    idxLabel;
    903     /** The fixup type (IEMNATIVEFIXUPTYPE). */
    904     uint8_t     enmType;
    905     /** Addend or other data. */
    906     int8_t      offAddend;
    907 } IEMNATIVEFIXUP;
    908 /** Pointer to a native code generator fixup. */
    909 typedef IEMNATIVEFIXUP *PIEMNATIVEFIXUP;
    910 
    911855
    912856/**
     
    920864
    921865
    922 static void iemNativeReInit(PVMCPUCC pVCpu)
    923 {
    924     pVCpu->iem.s.Native.cLabels   = 0;
    925     pVCpu->iem.s.Native.cFixups   = 0;
    926 }
    927 
    928 
    929 static bool iemNativeInit(PVMCPUCC pVCpu)
    930 {
     866/**
     867 * Reinitializes the native recompiler state.
     868 *
     869 * Called before starting a new recompile job.
     870 */
     871static PIEMRECOMPILERSTATE iemNativeReInit(PIEMRECOMPILERSTATE pReNative)
     872{
     873    pReNative->cLabels   = 0;
     874    pReNative->cFixups   = 0;
     875    return pReNative;
     876}
     877
     878
     879/**
     880 * Allocates and initializes the native recompiler state.
     881 *
     882 * This is called the first time an EMT wants to recompile something.
     883 *
     884 * @returns Pointer to the new recompiler state.
     885 * @param   pVCpu   The cross context virtual CPU structure of the calling
     886 *                  thread.
     887 * @thread  EMT(pVCpu)
     888 */
     889static PIEMRECOMPILERSTATE iemNativeInit(PVMCPUCC pVCpu)
     890{
     891    VMCPU_ASSERT_EMT(pVCpu);
     892
     893    PIEMRECOMPILERSTATE pReNative = (PIEMRECOMPILERSTATE)RTMemAllocZ(sizeof(*pReNative));
     894    AssertReturn(pReNative, NULL);
     895
    931896    /*
    932897     * Try allocate all the buffers and stuff we need.
    933898     */
    934     pVCpu->iem.s.Native.pInstrBuf = (PIEMNATIVEINSTR)RTMemAllocZ(_64K);
    935     pVCpu->iem.s.Native.paLabels  = (PIEMNATIVELABEL)RTMemAllocZ(sizeof(IEMNATIVELABEL) * _8K);
    936     pVCpu->iem.s.Native.paFixups  = (PIEMNATIVEFIXUP)RTMemAllocZ(sizeof(IEMNATIVEFIXUP) * _16K);
    937     if (RT_LIKELY(   pVCpu->iem.s.Native.pInstrBuf
    938                   && pVCpu->iem.s.Native.paLabels
    939                   && pVCpu->iem.s.Native.paFixups))
     899    pReNative->pInstrBuf = (PIEMNATIVEINSTR)RTMemAllocZ(_64K);
     900    pReNative->paLabels  = (PIEMNATIVELABEL)RTMemAllocZ(sizeof(IEMNATIVELABEL) * _8K);
     901    pReNative->paFixups  = (PIEMNATIVEFIXUP)RTMemAllocZ(sizeof(IEMNATIVEFIXUP) * _16K);
     902    if (RT_LIKELY(   pReNative->pInstrBuf
     903                  && pReNative->paLabels
     904                  && pReNative->paFixups))
    940905    {
    941906        /*
    942907         * Set the buffer & array sizes on success.
    943908         */
    944         pVCpu->iem.s.Native.cInstrBufAlloc = _64K / sizeof(IEMNATIVEINSTR);
    945         pVCpu->iem.s.Native.cLabelsAlloc   = _8K;
    946         pVCpu->iem.s.Native.cFixupsAlloc   = _16K;
    947         iemNativeReInit(pVCpu);
    948         return true;
    949     }
    950 
    951     /*
    952      * Failed. Cleanup and the reset state.
     909        pReNative->cInstrBufAlloc = _64K / sizeof(IEMNATIVEINSTR);
     910        pReNative->cLabelsAlloc   = _8K;
     911        pReNative->cFixupsAlloc   = _16K;
     912
     913        /*
     914         * Done, just need to save it and reinit it.
     915         */
     916        pVCpu->iem.s.pNativeRecompilerStateR3 = pReNative;
     917        return iemNativeReInit(pReNative);
     918    }
     919
     920    /*
     921     * Failed. Cleanup and return.
    953922     */
    954923    AssertFailed();
    955     RTMemFree(pVCpu->iem.s.Native.pInstrBuf);
    956     RTMemFree(pVCpu->iem.s.Native.paLabels);
    957     RTMemFree(pVCpu->iem.s.Native.paFixups);
    958     pVCpu->iem.s.Native.pInstrBuf = NULL;
    959     pVCpu->iem.s.Native.paLabels  = NULL;
    960     pVCpu->iem.s.Native.paFixups  = NULL;
    961     return false;
    962 }
    963 
    964 
    965 static uint32_t iemNativeMakeLabel(PVMCPUCC pVCpu, IEMNATIVELABELTYPE enmType,
    966                                    uint32_t offWhere = UINT32_MAX, uint16_t uData = 0)
     924    RTMemFree(pReNative->pInstrBuf);
     925    RTMemFree(pReNative->paLabels);
     926    RTMemFree(pReNative->paFixups);
     927    RTMemFree(pReNative);
     928    return NULL;
     929}
     930
     931
     932/**
     933 * Defines a label.
     934 *
     935 * @returns Label ID.
     936 * @param   pReNative   The native recompile state.
     937 * @param   enmType     The label type.
     938 * @param   offWhere    The instruction offset of the label.  UINT32_MAX if the
     939 *                      label is not yet defined (default).
     940 * @param   uData       Data associated with the lable. Only applicable to
     941 *                      certain type of labels. Default is zero.
     942 */
     943DECLHIDDEN(uint32_t) iemNativeMakeLabel(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType,
     944                                        uint32_t offWhere /*= UINT32_MAX*/, uint16_t uData /*= 0*/) RT_NOEXCEPT
    967945{
    968946    /*
    969947     * Do we have the label already?
    970948     */
    971     PIEMNATIVELABEL paLabels = pVCpu->iem.s.Native.paLabels;
    972     uint32_t const  cLabels  = pVCpu->iem.s.Native.cLabels;
     949    PIEMNATIVELABEL paLabels = pReNative->paLabels;
     950    uint32_t const  cLabels  = pReNative->cLabels;
    973951    for (uint32_t i = 0; i < cLabels; i++)
    974952        if (   paLabels[i].enmType == enmType
     
    987965     * Make sure we've got room for another label.
    988966     */
    989     if (RT_LIKELY(cLabels < pVCpu->iem.s.Native.cLabelsAlloc))
     967    if (RT_LIKELY(cLabels < pReNative->cLabelsAlloc))
    990968    { /* likely */ }
    991969    else
    992970    {
    993         uint32_t cNew = pVCpu->iem.s.Native.cLabelsAlloc;
     971        uint32_t cNew = pReNative->cLabelsAlloc;
    994972        AssertReturn(cNew, UINT32_MAX);
    995973        AssertReturn(cLabels == cNew, UINT32_MAX);
     
    998976        paLabels = (PIEMNATIVELABEL)RTMemRealloc(paLabels, cNew * sizeof(paLabels[0]));
    999977        AssertReturn(paLabels, UINT32_MAX);
    1000         pVCpu->iem.s.Native.paLabels     = paLabels;
    1001         pVCpu->iem.s.Native.cLabelsAlloc = cNew;
     978        pReNative->paLabels     = paLabels;
     979        pReNative->cLabelsAlloc = cNew;
    1002980    }
    1003981
     
    1008986    paLabels[cLabels].enmType = enmType;
    1009987    paLabels[cLabels].uData   = uData;
    1010     pVCpu->iem.s.Native.cLabels = cLabels + 1;
     988    pReNative->cLabels = cLabels + 1;
    1011989    return cLabels;
    1012990}
    1013991
    1014992
    1015 static uint32_t iemNativeFindLabel(PVMCPUCC pVCpu, IEMNATIVELABELTYPE enmType,
    1016                                    uint32_t offWhere = UINT32_MAX, uint16_t uData = 0)
    1017 {
    1018     PIEMNATIVELABEL paLabels = pVCpu->iem.s.Native.paLabels;
    1019     uint32_t const  cLabels  = pVCpu->iem.s.Native.cLabels;
     993/**
     994 * Looks up a lable.
     995 *
     996 * @returns Label ID if found, UINT32_MAX if not.
     997 */
     998static uint32_t iemNativeFindLabel(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType,
     999                                   uint32_t offWhere = UINT32_MAX, uint16_t uData = 0) RT_NOEXCEPT
     1000{
     1001    PIEMNATIVELABEL paLabels = pReNative->paLabels;
     1002    uint32_t const  cLabels  = pReNative->cLabels;
    10201003    for (uint32_t i = 0; i < cLabels; i++)
    10211004        if (   paLabels[i].enmType == enmType
     
    10301013
    10311014
    1032 static bool iemNativeAddFixup(PVMCPUCC pVCpu, uint32_t offWhere, uint32_t idxLabel,
    1033                               IEMNATIVEFIXUPTYPE enmType, int8_t offAddend = 0)
     1015/**
     1016 * Adds a fixup.
     1017 *
     1018 * @returns Success indicator.
     1019 * @param   pReNative   The native recompile state.
     1020 * @param   offWhere    The instruction offset of the fixup location.
     1021 * @param   idxLabel    The target label ID for the fixup.
     1022 * @param   enmType     The fixup type.
     1023 * @param   offAddend   Fixup addend if applicable to the type. Default is 0.
     1024 */
     1025DECLHIDDEN(bool) iemNativeAddFixup(PIEMRECOMPILERSTATE pReNative, uint32_t offWhere, uint32_t idxLabel,
     1026                                   IEMNATIVEFIXUPTYPE enmType, int8_t offAddend /*= 0*/) RT_NOEXCEPT
    10341027{
    10351028    Assert(idxLabel <= UINT16_MAX);
     
    10391032     * Make sure we've room.
    10401033     */
    1041     PIEMNATIVEFIXUP paFixups = pVCpu->iem.s.Native.paFixups;
    1042     uint32_t const  cFixups  = pVCpu->iem.s.Native.cFixups;
    1043     if (RT_LIKELY(cFixups < pVCpu->iem.s.Native.cFixupsAlloc))
     1034    PIEMNATIVEFIXUP paFixups = pReNative->paFixups;
     1035    uint32_t const  cFixups  = pReNative->cFixups;
     1036    if (RT_LIKELY(cFixups < pReNative->cFixupsAlloc))
    10441037    { /* likely */ }
    10451038    else
    10461039    {
    1047         uint32_t cNew = pVCpu->iem.s.Native.cFixupsAlloc;
     1040        uint32_t cNew = pReNative->cFixupsAlloc;
    10481041        AssertReturn(cNew, false);
    10491042        AssertReturn(cFixups == cNew, false);
     
    10521045        paFixups = (PIEMNATIVEFIXUP)RTMemRealloc(paFixups, cNew * sizeof(paFixups[0]));
    10531046        AssertReturn(paFixups, false);
    1054         pVCpu->iem.s.Native.paFixups     = paFixups;
    1055         pVCpu->iem.s.Native.cFixupsAlloc = cNew;
     1047        pReNative->paFixups     = paFixups;
     1048        pReNative->cFixupsAlloc = cNew;
    10561049    }
    10571050
     
    10631056    paFixups[cFixups].enmType   = enmType;
    10641057    paFixups[cFixups].offAddend = offAddend;
    1065     pVCpu->iem.s.Native.cFixups = cFixups + 1;
     1058    pReNative->cFixups = cFixups + 1;
    10661059    return true;
    10671060}
    10681061
    1069 
    1070 static PIEMNATIVEINSTR iemNativeInstrBufEnsureSlow(PVMCPUCC pVCpu, uint32_t off, uint32_t cInstrReq)
     1062/**
     1063 * Slow code path for iemNativeInstrBufEnsure.
     1064 */
     1065DECLHIDDEN(PIEMNATIVEINSTR) iemNativeInstrBufEnsureSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     1066                                                        uint32_t cInstrReq) RT_NOEXCEPT
    10711067{
    10721068    /* Double the buffer size till we meet the request. */
    1073     uint32_t cNew = pVCpu->iem.s.Native.cInstrBufAlloc;
     1069    uint32_t cNew = pReNative->cInstrBufAlloc;
    10741070    AssertReturn(cNew > 0, NULL);
    10751071    do
     
    10801076    AssertReturn(cbNew <= _2M, NULL);
    10811077
    1082     void *pvNew = RTMemRealloc(pVCpu->iem.s.Native.pInstrBuf, cbNew);
     1078    void *pvNew = RTMemRealloc(pReNative->pInstrBuf, cbNew);
    10831079    AssertReturn(pvNew, NULL);
    10841080
    1085     pVCpu->iem.s.Native.cInstrBufAlloc   = cNew;
    1086     return pVCpu->iem.s.Native.pInstrBuf = (PIEMNATIVEINSTR)pvNew;
    1087 }
    1088 
    1089 
    1090 DECL_FORCE_INLINE(PIEMNATIVEINSTR) iemNativeInstrBufEnsure(PVMCPUCC pVCpu, uint32_t off, uint32_t cInstrReq)
    1091 {
    1092     if (RT_LIKELY(off + cInstrReq <= pVCpu->iem.s.Native.cInstrBufAlloc))
    1093         return pVCpu->iem.s.Native.pInstrBuf;
    1094     return iemNativeInstrBufEnsureSlow(pVCpu, off, cInstrReq);
    1095 }
    1096 
    1097 
    1098 /**
    1099  * Emit a simple marker instruction to more easily tell where something starts
    1100  * in the disassembly.
    1101  */
    1102 uint32_t iemNativeEmitMarker(PVMCPUCC pVCpu, uint32_t off)
    1103 {
    1104 #ifdef RT_ARCH_AMD64
    1105     uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 1);
    1106     AssertReturn(pbCodeBuf, UINT32_MAX);
    1107     pbCodeBuf[off++] = 0x90;                    /* nop */
    1108 
    1109 #elif RT_ARCH_ARM64
    1110     uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 1);
    1111     pu32CodeBuf[off++] = 0xe503201f;            /* nop? */
    1112 
    1113 #else
    1114 # error "port me"
    1115 #endif
    1116     return off;
    1117 }
    1118 
    1119 
    1120 static uint32_t iemNativeEmitGprZero(PVMCPUCC pVCpu, uint32_t off, uint8_t iGpr)
    1121 {
    1122 #ifdef RT_ARCH_AMD64
    1123     uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 3);
    1124     AssertReturn(pbCodeBuf, UINT32_MAX);
    1125     if (iGpr >= 8)                          /* xor gpr32, gpr32 */
    1126         pbCodeBuf[off++] = X86_OP_REX_R | X86_OP_REX_B;
    1127     pbCodeBuf[off++] = 0x33;
    1128     pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGpr & 7, iGpr & 7);
    1129 
    1130 #elif RT_ARCH_ARM64
    1131     RT_NOREF(pVCpu, iGpr, uImm64);
    1132     off = UINT32_MAX;
    1133 
    1134 #else
    1135 # error "port me"
    1136 #endif
    1137     RT_NOREF(pVCpu);
    1138     return off;
    1139 }
    1140 
    1141 
    1142 static uint32_t iemNativeEmitLoadGprImm64(PVMCPUCC pVCpu, uint32_t off, uint8_t iGpr, uint64_t uImm64)
    1143 {
    1144     if (!uImm64)
    1145         return iemNativeEmitGprZero(pVCpu, off, iGpr);
    1146 
    1147 #ifdef RT_ARCH_AMD64
    1148     if (uImm64 <= UINT32_MAX)
    1149     {
    1150         /* mov gpr, imm32 */
    1151         uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 6);
    1152         AssertReturn(pbCodeBuf, UINT32_MAX);
    1153         if (iGpr >= 8)
    1154             pbCodeBuf[off++] = X86_OP_REX_B;
    1155         pbCodeBuf[off++] = 0xb8 + (iGpr & 7);
    1156         pbCodeBuf[off++] = RT_BYTE1(uImm64);
    1157         pbCodeBuf[off++] = RT_BYTE2(uImm64);
    1158         pbCodeBuf[off++] = RT_BYTE3(uImm64);
    1159         pbCodeBuf[off++] = RT_BYTE4(uImm64);
    1160     }
    1161     else
    1162     {
    1163         /* mov gpr, imm64 */
    1164         uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 10);
    1165         AssertReturn(pbCodeBuf, UINT32_MAX);
    1166         if (iGpr < 8)
    1167             pbCodeBuf[off++] = X86_OP_REX_W;
    1168         else
    1169             pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_B;
    1170         pbCodeBuf[off++] = 0xb8 + (iGpr & 7);
    1171         pbCodeBuf[off++] = RT_BYTE1(uImm64);
    1172         pbCodeBuf[off++] = RT_BYTE2(uImm64);
    1173         pbCodeBuf[off++] = RT_BYTE3(uImm64);
    1174         pbCodeBuf[off++] = RT_BYTE4(uImm64);
    1175         pbCodeBuf[off++] = RT_BYTE5(uImm64);
    1176         pbCodeBuf[off++] = RT_BYTE6(uImm64);
    1177         pbCodeBuf[off++] = RT_BYTE7(uImm64);
    1178         pbCodeBuf[off++] = RT_BYTE8(uImm64);
    1179     }
    1180 
    1181 #elif RT_ARCH_ARM64
    1182     RT_NOREF(pVCpu, iGpr, uImm64);
    1183     off = UINT32_MAX;
    1184 
    1185 #else
    1186 # error "port me"
    1187 #endif
    1188     return off;
    1189 }
    1190 
    1191 
    1192 static uint32_t iemNativeEmitLoadGprFromVCpuU32(PVMCPUCC pVCpu, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
    1193 {
    1194 #ifdef RT_ARCH_AMD64
    1195     uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 7);
    1196     AssertReturn(pbCodeBuf, UINT32_MAX);
    1197 
    1198     /* mov reg32, mem32 */
    1199     if (iGpr >= 8)
    1200         pbCodeBuf[off++] = X86_OP_REX_R;
    1201     pbCodeBuf[off++] = 0x8b;
    1202     if (offVCpu < 128)
    1203     {
    1204         pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, iGpr & 7, X86_GREG_xBX);
    1205         pbCodeBuf[off++] = (uint8_t)offVCpu;
    1206     }
    1207     else
    1208     {
    1209         pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, iGpr & 7, X86_GREG_xBX);
    1210         pbCodeBuf[off++] = RT_BYTE1(offVCpu);
    1211         pbCodeBuf[off++] = RT_BYTE2(offVCpu);
    1212         pbCodeBuf[off++] = RT_BYTE3(offVCpu);
    1213         pbCodeBuf[off++] = RT_BYTE4(offVCpu);
    1214     }
    1215 
    1216 #elif RT_ARCH_ARM64
    1217     RT_NOREF(pVCpu, idxInstr);
    1218     off = UINT32_MAX;
    1219 
    1220 #else
    1221 # error "port me"
    1222 #endif
    1223     return off;
    1224 }
    1225 
    1226 
    1227 static uint32_t iemNativeEmitLoadGprFromGpr(PVMCPUCC pVCpu, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
    1228 {
    1229 #ifdef RT_ARCH_AMD64
    1230     /* mov gprdst, gprsrc */
    1231     uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 3);
    1232     AssertReturn(pbCodeBuf, UINT32_MAX);
    1233     if ((iGprDst | iGprSrc) >= 8)
    1234         pbCodeBuf[off++] = iGprDst < 8  ? X86_OP_REX_W | X86_OP_REX_B
    1235                          : iGprSrc >= 8 ? X86_OP_REX_W | X86_OP_REX_R | X86_OP_REX_B
    1236                          :                X86_OP_REX_W | X86_OP_REX_R;
    1237     else
    1238         pbCodeBuf[off++] = X86_OP_REX_W;
    1239     pbCodeBuf[off++] = 0x8b;
    1240     pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
    1241 
    1242 #elif RT_ARCH_ARM64
    1243     RT_NOREF(pVCpu, iGprDst, iGprSrc);
    1244     off = UINT32_MAX;
    1245 
    1246 #else
    1247 # error "port me"
    1248 #endif
    1249     return off;
    1250 }
    1251 
    1252 #ifdef RT_ARCH_AMD64
    1253 /**
    1254  * Common bit of iemNativeEmitLoadGprByBp and friends.
    1255  */
    1256 DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByBpDisp(uint8_t *pbCodeBuf, uint32_t off, uint8_t iGprReg, int32_t offDisp)
    1257 {
    1258     if (offDisp < 128 && offDisp >= -128)
    1259     {
    1260         pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, iGprReg & 7, X86_GREG_xBP);
    1261         pbCodeBuf[off++] = (uint8_t)(int8_t)offDisp;
    1262     }
    1263     else
    1264     {
    1265         pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, iGprReg & 7, X86_GREG_xBP);
    1266         pbCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
    1267         pbCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp);
    1268         pbCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp);
    1269         pbCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp);
    1270     }
    1271     return off;
    1272 }
    1273 #endif
    1274 
    1275 
    1276 #ifdef RT_ARCH_AMD64
    1277 /**
    1278  * Emits a 64-bit GRP load instruction with an BP relative source address.
    1279  */
    1280 static uint32_t iemNativeEmitLoadGprByBp(PVMCPUCC pVCpu, uint32_t off, uint8_t iGprDst, int32_t offDisp)
    1281 {
    1282     /* mov gprdst, qword [rbp + offDisp]  */
    1283     uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 7);
    1284     if (iGprDst < 8)
    1285         pbCodeBuf[off++] = X86_OP_REX_W;
    1286     else
    1287         pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
    1288     pbCodeBuf[off++] = 0x8b;
    1289     return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprDst, offDisp);
    1290 }
    1291 #endif
    1292 
    1293 
    1294 #ifdef RT_ARCH_AMD64
    1295 /**
    1296  * Emits a 32-bit GRP load instruction with an BP relative source address.
    1297  */
    1298 static uint32_t iemNativeEmitLoadGprByBpU32(PVMCPUCC pVCpu, uint32_t off, uint8_t iGprDst, int32_t offDisp)
    1299 {
    1300     /* mov gprdst, dword [rbp + offDisp]  */
    1301     uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 7);
    1302     if (iGprDst >= 8)
    1303         pbCodeBuf[off++] = X86_OP_REX_R;
    1304     pbCodeBuf[off++] = 0x8b;
    1305     return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprDst, offDisp);
    1306 }
    1307 #endif
    1308 
    1309 
    1310 #ifdef RT_ARCH_AMD64
    1311 /**
    1312  * Emits a load effective address to a GRP with an BP relative source address.
    1313  */
    1314 static uint32_t iemNativeEmitLeaGrpByBp(PVMCPUCC pVCpu, uint32_t off, uint8_t iGprDst, int32_t offDisp)
    1315 {
    1316     /* lea gprdst, [rbp + offDisp] */
    1317     uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 7);
    1318     if (iGprDst < 8)
    1319         pbCodeBuf[off++] = X86_OP_REX_W;
    1320     else
    1321         pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
    1322     pbCodeBuf[off++] = 0x8d;
    1323     return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprDst, offDisp);
    1324 }
    1325 #endif
    1326 
    1327 
    1328 #ifdef RT_ARCH_AMD64
    1329 /**
    1330  * Emits a 64-bit GPR store with an BP relative destination address.
    1331  */
    1332 static uint32_t iemNativeEmitStoreGprByBp(PVMCPUCC pVCpu, uint32_t off, int32_t offDisp, uint8_t iGprSrc)
    1333 {
    1334     /* mov qword [rbp + offDisp], gprdst */
    1335     uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 7);
    1336     if (iGprSrc < 8)
    1337         pbCodeBuf[off++] = X86_OP_REX_W;
    1338     else
    1339         pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
    1340     pbCodeBuf[off++] = 0x89;
    1341     return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprSrc, offDisp);
    1342 }
    1343 #endif
    1344 
    1345 
    1346 #ifdef RT_ARCH_AMD64
    1347 /**
    1348  * Emits a 64-bit GPR subtract with a signed immediate subtrahend.
    1349  */
    1350 static uint32_t iemNativeEmitSubGprImm(PVMCPUCC pVCpu, uint32_t off, uint8_t iGprDst, int32_t iSubtrahend)
    1351 {
    1352     /* sub gprdst, imm8/imm32 */
    1353     uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 7);
    1354     if (iGprDst < 7)
    1355         pbCodeBuf[off++] = X86_OP_REX_W;
    1356     else
    1357         pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_B;
    1358     if (iSubtrahend < 128 && iSubtrahend >= -128)
    1359     {
    1360         pbCodeBuf[off++] = 0x83;
    1361         pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
    1362         pbCodeBuf[off++] = (uint8_t)iSubtrahend;
    1363     }
    1364     else
    1365     {
    1366         pbCodeBuf[off++] = 0x81;
    1367         pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
    1368         pbCodeBuf[off++] = RT_BYTE1(iSubtrahend);
    1369         pbCodeBuf[off++] = RT_BYTE2(iSubtrahend);
    1370         pbCodeBuf[off++] = RT_BYTE3(iSubtrahend);
    1371         pbCodeBuf[off++] = RT_BYTE4(iSubtrahend);
    1372     }
    1373     return off;
    1374 }
    1375 #endif
     1081    pReNative->cInstrBufAlloc   = cNew;
     1082    return pReNative->pInstrBuf = (PIEMNATIVEINSTR)pvNew;
     1083}
    13761084
    13771085
     
    13801088 * from the code if either are non-zero.
    13811089 */
    1382 static uint32_t iemNativeEmitCheckCallRetAndPassUp(PVMCPUCC pVCpu, uint32_t off, uint8_t idxInstr)
     1090DECLHIDDEN(uint32_t) iemNativeEmitCheckCallRetAndPassUp(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     1091                                                        uint8_t idxInstr) RT_NOEXCEPT
    13831092{
    13841093#ifdef RT_ARCH_AMD64
     
    13861095
    13871096    /* edx = rcPassUp */
    1388     off = iemNativeEmitLoadGprFromVCpuU32(pVCpu, off, X86_GREG_xDX, RT_UOFFSETOF(VMCPUCC, iem.s.rcPassUp));
     1097    off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, X86_GREG_xDX, RT_UOFFSETOF(VMCPUCC, iem.s.rcPassUp));
    13891098    AssertReturn(off != UINT32_MAX, UINT32_MAX);
    13901099
    1391     uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 10);
     1100    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
    13921101    AssertReturn(pbCodeBuf, UINT32_MAX);
    13931102
     
    14021111    pbCodeBuf[off++] = 0x0f;                    /* jnz rel32 */
    14031112    pbCodeBuf[off++] = 0x85;
    1404     uint32_t const idxLabel = iemNativeMakeLabel(pVCpu, kIemNativeLabelType_NonZeroRetOrPassUp);
     1113    uint32_t const idxLabel = iemNativeMakeLabel(pReNative, kIemNativeLabelType_NonZeroRetOrPassUp);
    14051114    AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);
    1406     AssertReturn(iemNativeAddFixup(pVCpu, off, idxLabel, kIemNativeFixupType_Rel32, -4), UINT32_MAX);
     1115    AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_Rel32, -4), UINT32_MAX);
    14071116    pbCodeBuf[off++] = 0x00;
    14081117    pbCodeBuf[off++] = 0x00;
     
    14131122
    14141123#elif RT_ARCH_ARM64
    1415     RT_NOREF(pVCpu, idxInstr);
     1124    RT_NOREF(pReNative, idxInstr);
    14161125    off = UINT32_MAX;
    14171126
     
    14261135 * Emits a call to a threaded worker function.
    14271136 */
    1428 static uint32_t iemNativeEmitThreadedCall(PVMCPUCC pVCpu, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry)
     1137static int32_t iemNativeEmitThreadedCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry)
    14291138{
    14301139#ifdef VBOX_STRICT
    1431     off = iemNativeEmitMarker(pVCpu, off);
     1140    off = iemNativeEmitMarker(pReNative, off);
    14321141    AssertReturn(off != UINT32_MAX, UINT32_MAX);
    14331142#endif
     
    14381147# ifdef RT_OS_WINDOWS
    14391148#  ifndef VBOXSTRICTRC_STRICT_ENABLED
    1440     off = iemNativeEmitLoadGprFromGpr(pVCpu, off, X86_GREG_xCX, X86_GREG_xBX);
     1149    off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xCX, X86_GREG_xBX);
    14411150    AssertReturn(off != UINT32_MAX, UINT32_MAX);
    14421151    if (cParams > 0)
    14431152    {
    1444         off = iemNativeEmitLoadGprImm64(pVCpu, off, X86_GREG_xDX, pCallEntry->auParams[0]);
     1153        off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xDX, pCallEntry->auParams[0]);
    14451154        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    14461155    }
    14471156    if (cParams > 1)
    14481157    {
    1449         off = iemNativeEmitLoadGprImm64(pVCpu, off, X86_GREG_x8, pCallEntry->auParams[1]);
     1158        off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x8, pCallEntry->auParams[1]);
    14501159        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    14511160    }
    14521161    if (cParams > 2)
    14531162    {
    1454         off = iemNativeEmitLoadGprImm64(pVCpu, off, X86_GREG_x9, pCallEntry->auParams[2]);
     1163        off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x9, pCallEntry->auParams[2]);
    14551164        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    14561165    }
    14571166#  else  /* VBOXSTRICTRC: Returned via hidden parameter. Sigh. */
    1458     off = iemNativeEmitLoadGprFromGpr(pVCpu, off, X86_GREG_xDX, X86_GREG_xBX);
     1167    off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, X86_GREG_xBX);
    14591168    AssertReturn(off != UINT32_MAX, UINT32_MAX);
    14601169    if (cParams > 0)
    14611170    {
    1462         off = iemNativeEmitLoadGprImm64(pVCpu, off, X86_GREG_x8, pCallEntry->auParams[0]);
     1171        off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x8, pCallEntry->auParams[0]);
    14631172        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    14641173    }
    14651174    if (cParams > 1)
    14661175    {
    1467         off = iemNativeEmitLoadGprImm64(pVCpu, off, X86_GREG_x9, pCallEntry->auParams[1]);
     1176        off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x9, pCallEntry->auParams[1]);
    14681177        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    14691178    }
    14701179    if (cParams > 2)
    14711180    {
    1472         off = iemNativeEmitLoadGprImm64(pVCpu, off, X86_GREG_x10, pCallEntry->auParams[2]);
     1181        off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x10, pCallEntry->auParams[2]);
    14731182        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    14741183    }
    1475     off = iemNativeEmitStoreGprByBp(pVCpu, off, IEMNATIVE_FP_OFF_STACK_ARG0, X86_GREG_x10);
     1184    off = iemNativeEmitStoreGprByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, X86_GREG_x10);
    14761185    AssertReturn(off != UINT32_MAX, UINT32_MAX);
    1477     off = iemNativeEmitLeaGrpByBp(pVCpu, off, X86_GREG_xCX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict */
     1186    off = iemNativeEmitLeaGrpByBp(pReNative, off, X86_GREG_xCX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict */
    14781187    AssertReturn(off != UINT32_MAX, UINT32_MAX);
    14791188#  endif /* VBOXSTRICTRC_STRICT_ENABLED */
    14801189# else
    1481     off = iemNativeEmitLoadGprFromGpr(pVCpu, off, X86_GREG_xDI, X86_GREG_xBX);
     1190    off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDI, X86_GREG_xBX);
    14821191    AssertReturn(off != UINT32_MAX, UINT32_MAX);
    14831192    if (cParams > 0)
    14841193    {
    1485         off = iemNativeEmitLoadGprImm64(pVCpu, off, X86_GREG_xSI, pCallEntry->auParams[0]);
     1194        off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xSI, pCallEntry->auParams[0]);
    14861195        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    14871196    }
    14881197    if (cParams > 1)
    14891198    {
    1490         off = iemNativeEmitLoadGprImm64(pVCpu, off, X86_GREG_xDX, pCallEntry->auParams[1]);
     1199        off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xDX, pCallEntry->auParams[1]);
    14911200        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    14921201    }
    14931202    if (cParams > 2)
    14941203    {
    1495         off = iemNativeEmitLoadGprImm64(pVCpu, off, X86_GREG_xCX, pCallEntry->auParams[2]);
     1204        off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xCX, pCallEntry->auParams[2]);
    14961205        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    14971206    }
    14981207# endif
    1499     off = iemNativeEmitLoadGprImm64(pVCpu, off, X86_GREG_xAX, (uintptr_t)g_apfnIemThreadedFunctions[pCallEntry->enmFunction]);
     1208    off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xAX, (uintptr_t)g_apfnIemThreadedFunctions[pCallEntry->enmFunction]);
    15001209    AssertReturn(off != UINT32_MAX, UINT32_MAX);
    15011210
    1502     uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 2);
     1211    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
    15031212    AssertReturn(pbCodeBuf, UINT32_MAX);
    15041213    pbCodeBuf[off++] = 0xff;                    /* call rax */
     
    15061215
    15071216# if defined(VBOXSTRICTRC_STRICT_ENABLED) && defined(RT_OS_WINDOWS)
    1508     off = iemNativeEmitLoadGprByBpU32(pVCpu, off, X86_GREG_xAX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict (see above) */
     1217    off = iemNativeEmitLoadGprByBpU32(pReNative, off, X86_GREG_xAX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict (see above) */
    15091218# endif
    15101219
    15111220    /* Check the status code. */
    1512     off = iemNativeEmitCheckCallRetAndPassUp(pVCpu, off, pCallEntry->idxInstr);
     1221    off = iemNativeEmitCheckCallRetAndPassUp(pReNative, off, pCallEntry->idxInstr);
    15131222    AssertReturn(off != UINT32_MAX, off);
    15141223
    15151224
    15161225#elif RT_ARCH_ARM64
    1517     RT_NOREF(pVCpu, pCallEntry);
     1226    RT_NOREF(pReNative, pCallEntry);
    15181227    off = UINT32_MAX;
    15191228
     
    15281237 * Emits a standard epilog.
    15291238 */
    1530 static uint32_t iemNativeEmitEpilog(PVMCPUCC pVCpu, uint32_t off)
     1239static uint32_t iemNativeEmitEpilog(PIEMRECOMPILERSTATE pReNative, uint32_t off)
    15311240{
    15321241#ifdef RT_ARCH_AMD64
    1533     uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 20);
     1242    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20);
    15341243    AssertReturn(pbCodeBuf, UINT32_MAX);
    15351244
     
    15431252     * Define label for common return point.
    15441253     */
    1545     uint32_t const idxReturn = iemNativeMakeLabel(pVCpu, kIemNativeLabelType_Return, off);
     1254    uint32_t const idxReturn = iemNativeMakeLabel(pReNative, kIemNativeLabelType_Return, off);
    15461255    AssertReturn(idxReturn != UINT32_MAX, UINT32_MAX);
    15471256
     
    15731282     * Generate the rc + rcPassUp fiddling code if needed.
    15741283     */
    1575     uint32_t idxLabel = iemNativeFindLabel(pVCpu, kIemNativeLabelType_NonZeroRetOrPassUp);
     1284    uint32_t idxLabel = iemNativeFindLabel(pReNative, kIemNativeLabelType_NonZeroRetOrPassUp);
    15761285    if (idxLabel != UINT32_MAX)
    15771286    {
    1578         Assert(pVCpu->iem.s.Native.paLabels[idxLabel].off == UINT32_MAX);
    1579         pVCpu->iem.s.Native.paLabels[idxLabel].off = off;
     1287        Assert(pReNative->paLabels[idxLabel].off == UINT32_MAX);
     1288        pReNative->paLabels[idxLabel].off = off;
    15801289
    15811290        /* Call helper and jump to return point. */
    15821291# ifdef RT_OS_WINDOWS
    1583         off = iemNativeEmitLoadGprFromGpr(pVCpu, off, X86_GREG_x8,  X86_GREG_xCX); /* cl = instruction number */
     1292        off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_x8,  X86_GREG_xCX); /* cl = instruction number */
    15841293        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    1585         off = iemNativeEmitLoadGprFromGpr(pVCpu, off, X86_GREG_xCX, X86_GREG_xBX);
     1294        off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xCX, X86_GREG_xBX);
    15861295        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    1587         off = iemNativeEmitLoadGprFromGpr(pVCpu, off, X86_GREG_xDX, X86_GREG_xAX);
     1296        off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, X86_GREG_xAX);
    15881297        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    15891298# else
    1590         off = iemNativeEmitLoadGprFromGpr(pVCpu, off, X86_GREG_xDI, X86_GREG_xBX);
     1299        off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDI, X86_GREG_xBX);
    15911300        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    1592         off = iemNativeEmitLoadGprFromGpr(pVCpu, off, X86_GREG_xSI, X86_GREG_xAX);
     1301        off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xSI, X86_GREG_xAX);
    15931302        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    1594         off = iemNativeEmitLoadGprFromGpr(pVCpu, off, X86_GREG_xDX, X86_GREG_xCX); /* cl = instruction number */
     1303        off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, X86_GREG_xCX); /* cl = instruction number */
    15951304        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    15961305# endif
    1597         off = iemNativeEmitLoadGprImm64(pVCpu, off, X86_GREG_xAX, (uintptr_t)iemNativeHlpExecStatusCodeFiddling);
     1306        off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xAX, (uintptr_t)iemNativeHlpExecStatusCodeFiddling);
    15981307        AssertReturn(off != UINT32_MAX, UINT32_MAX);
    15991308
    1600         pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 10);
     1309        pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
    16011310        AssertReturn(pbCodeBuf, UINT32_MAX);
    16021311        pbCodeBuf[off++] = 0xff;                    /* call rax */
     
    16041313
    16051314        /* Jump to common return point. */
    1606         uint32_t offRel = pVCpu->iem.s.Native.paLabels[idxReturn].off - (off + 2);
     1315        uint32_t offRel = pReNative->paLabels[idxReturn].off - (off + 2);
    16071316        if (-(int32_t)offRel <= 127)
    16081317        {
     
    16241333
    16251334#elif RT_ARCH_ARM64
    1626     RT_NOREF(pVCpu);
     1335    RT_NOREF(pReNative);
    16271336    off = UINT32_MAX;
    16281337
     
    16371346 * Emits a standard prolog.
    16381347 */
    1639 static uint32_t iemNativeEmitProlog(PVMCPUCC pVCpu, uint32_t off)
     1348static uint32_t iemNativeEmitProlog(PIEMRECOMPILERSTATE pReNative, uint32_t off)
    16401349{
    16411350#ifdef RT_ARCH_AMD64
     
    16431352     * Set up a regular xBP stack frame, pushing all non-volatile GPRs,
    16441353     * reserving 64 bytes for stack variables plus 4 non-register argument
    1645      * slots.  Fixed register assignment: xBX = pVCpu;
     1354     * slots.  Fixed register assignment: xBX = pReNative;
    16461355     *
    16471356     * Since we always do the same register spilling, we can use the same
    16481357     * unwind description for all the code.
    16491358     */
    1650     uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pVCpu, off, 32);
     1359    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);
    16511360    AssertReturn(pbCodeBuf, UINT32_MAX);
    16521361    pbCodeBuf[off++] = 0x50 + X86_GREG_xBP;     /* push rbp */
     
    16751384    pbCodeBuf[off++] = 0x50 + X86_GREG_x15 - 8;
    16761385
    1677     off = iemNativeEmitSubGprImm(pVCpu, off,    /* sub rsp, byte 28h */
     1386    off = iemNativeEmitSubGprImm(pReNative, off,    /* sub rsp, byte 28h */
    16781387                                 X86_GREG_xSP,
    16791388                                   IEMNATIVE_FRAME_ALIGN_SIZE
     
    16861395
    16871396#elif RT_ARCH_ARM64
    1688     RT_NOREF(pVCpu);
     1397    RT_NOREF(pReNative);
    16891398    off = UINT32_MAX;
    16901399
     
    17121421     * we just need to reset it before using it again.
    17131422     */
    1714     if (RT_LIKELY(pVCpu->iem.s.Native.pInstrBuf))
    1715         iemNativeReInit(pVCpu);
     1423    PIEMRECOMPILERSTATE pReNative = pVCpu->iem.s.pNativeRecompilerStateR3;
     1424    if (RT_LIKELY(pReNative))
     1425        iemNativeReInit(pReNative);
    17161426    else
    1717         AssertReturn(iemNativeInit(pVCpu), pTb);
    1718 
    1719     /*
    1720      * Emit prolog code (fixed atm).
    1721      */
    1722     uint32_t off = iemNativeEmitProlog(pVCpu, 0);
     1427    {
     1428        pReNative = iemNativeInit(pVCpu);
     1429        AssertReturn(pReNative, pTb);
     1430    }
     1431
     1432    /*
     1433     * Emit prolog code (fixed).
     1434     */
     1435    uint32_t off = iemNativeEmitProlog(pReNative, 0);
    17231436    AssertReturn(off != UINT32_MAX, pTb);
    17241437
     
    17301443    while (cCallsLeft-- > 0)
    17311444    {
    1732         off = iemNativeEmitThreadedCall(pVCpu, off, pCallEntry);
     1445        off = iemNativeEmitThreadedCall(pReNative, off, pCallEntry);
    17331446        AssertReturn(off != UINT32_MAX, pTb);
    17341447
     
    17391452     * Emit the epilog code.
    17401453     */
    1741     off = iemNativeEmitEpilog(pVCpu, off);
     1454    off = iemNativeEmitEpilog(pReNative, off);
    17421455    AssertReturn(off != UINT32_MAX, pTb);
    17431456
     
    17451458     * Make sure all labels has been defined.
    17461459     */
    1747     PIEMNATIVELABEL const paLabels = pVCpu->iem.s.Native.paLabels;
     1460    PIEMNATIVELABEL const paLabels = pReNative->paLabels;
    17481461#ifdef VBOX_STRICT
    1749     uint32_t const        cLabels  = pVCpu->iem.s.Native.cLabels;
     1462    uint32_t const        cLabels  = pReNative->cLabels;
    17501463    for (uint32_t i = 0; i < cLabels; i++)
    17511464        AssertMsgReturn(paLabels[i].off < off, ("i=%d enmType=%d\n", i, paLabels[i].enmType), pTb);
     
    17611474    PIEMNATIVEINSTR const paFinalInstrBuf = (PIEMNATIVEINSTR)iemExecMemAllocatorAlloc(pVCpu, off * sizeof(IEMNATIVEINSTR));
    17621475    AssertReturn(paFinalInstrBuf, pTb);
    1763     memcpy(paFinalInstrBuf, pVCpu->iem.s.Native.pInstrBuf, off * sizeof(paFinalInstrBuf[0]));
     1476    memcpy(paFinalInstrBuf, pReNative->pInstrBuf, off * sizeof(paFinalInstrBuf[0]));
    17641477
    17651478    /*
    17661479     * Apply fixups.
    17671480     */
    1768     PIEMNATIVEFIXUP const paFixups   = pVCpu->iem.s.Native.paFixups;
    1769     uint32_t const        cFixups    = pVCpu->iem.s.Native.cFixups;
     1481    PIEMNATIVEFIXUP const paFixups   = pReNative->paFixups;
     1482    uint32_t const        cFixups    = pReNative->cFixups;
    17701483    for (uint32_t i = 0; i < cFixups; i++)
    17711484    {
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r101163 r101203  
    14281428    /** Pointer to the ring-3 executable memory allocator for this EMT. */
    14291429    R3PTRTYPE(struct IEMEXECMEMALLOCATOR *) pExecMemAllocatorR3;
    1430 
    1431     /** Native recompiler state for ring-3. */
    1432     struct IEMRECOMPILERSTATE
    1433     {
    1434         /** Size of the buffer that pbNativeRecompileBufR3 points to in
    1435          * IEMNATIVEINSTR units. */
    1436         uint32_t                            cInstrBufAlloc;
    1437         uint32_t                            uPadding; /* We don't keep track of this here... */
    1438         /** Fixed temporary code buffer for native recompilation. */
    1439         R3PTRTYPE(PIEMNATIVEINSTR)          pInstrBuf;
    1440 
    1441         /** Actual number of labels in paLabels. */
    1442         uint32_t                            cLabels;
    1443         /** Max number of entries allowed in paLabels before reallocating it. */
    1444         uint32_t                            cLabelsAlloc;
    1445         /** Labels defined while recompiling (referenced by fixups). */
    1446         R3PTRTYPE(struct IEMNATIVELABEL *)  paLabels;
    1447 
    1448         /** Actual number of fixups paFixups. */
    1449         uint32_t                            cFixups;
    1450         /** Max number of entries allowed in paFixups before reallocating it. */
    1451         uint32_t                            cFixupsAlloc;
    1452         /** Buffer used by the recompiler for recording fixups when generating code. */
    1453         R3PTRTYPE(struct IEMNATIVEFIXUP *)  paFixups;
    1454     } Native;
    1455 
    1456 //    /* Alignment. */
    1457 //    uint64_t                auAlignment10[1];
     1430    /** Pointer to the native recompiler state for ring-3. */
     1431    R3PTRTYPE(struct IEMRECOMPILERSTATE *)  pNativeRecompilerStateR3;
     1432    /** Alignment padding. */
     1433    uint64_t                auAlignment10[5];
    14581434    /** Statistics: Times TB execution was broken off before reaching the end. */
    14591435    STAMCOUNTER             StatTbExecBreaks;
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette