Changeset 101682 in vbox
- Timestamp:
- Oct 31, 2023 12:18:44 PM (15 months ago)
- Location:
- trunk
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/err.h
r101035 r101682 2508 2508 /** Recompiler: Translation block allocation failed. */ 2509 2509 #define VERR_IEM_TB_ALLOC_FAILED (-5312) 2510 /** Recompiler: Too deeply nested conditionals. */ 2511 #define VERR_IEM_COND_TOO_DEEPLY_NESTED (-5313) 2512 /** Recompiler: Failed to reconcile the register/variable state on endif. */ 2513 #define VERR_IEM_COND_ENDIF_RECONCILIATION_FAILED (-5314) 2514 /** Recompiler: Failed to allocate more memory for debug info. */ 2515 #define VERR_IEM_DBGINFO_OUT_OF_MEMORY (-5315) 2516 /** Recompiler: Debug info internal processing error \#1. */ 2517 #define VERR_IEM_DBGINFO_IPE_1 (-5316) 2518 /** Recompiler: Debug info internal processing error \#2. */ 2519 #define VERR_IEM_DBGINFO_IPE_2 (-5317) 2520 /** Recompiler: Fixup internal processing error \#1. */ 2521 #define VERR_IEM_FIXUP_IPE_1 (-5318) 2522 /** Recompiler: Too many fixups. */ 2523 #define VERR_IEM_FIXUP_TOO_MANY (-5319) 2524 /** Recompiler: Out of memory. */ 2525 #define VERR_IEM_FIXUP_OUT_OF_MEMORY (-5320) 2526 /** Recompiler: Hit instruction buffer size limit. */ 2527 #define VERR_IEM_INSTR_BUF_TOO_LARGE (-5321) 2528 /** Recompiler: Out of memory for the instruction buffer (regular heap). */ 2529 #define VERR_IEM_INSTR_BUF_OUT_OF_MEMORY (-5322) 2530 /** Recompiler: Too many labels. */ 2531 #define VERR_IEM_LABEL_TOO_MANY (-5323) 2532 /** Recompiler: Out of memory for labels. */ 2533 #define VERR_IEM_LABEL_OUT_OF_MEMORY (-5324) 2534 /** Recompiler: Label internal processing error \#1. */ 2535 #define VERR_IEM_LABEL_IPE_1 (-5325) 2536 /** Recompiler: Label internal processing error \#2. */ 2537 #define VERR_IEM_LABEL_IPE_2 (-5326) 2538 /** Recompiler: Label internal processing error \#3. */ 2539 #define VERR_IEM_LABEL_IPE_3 (-5327) 2540 /** Recompiler: Label internal processing error \#4. */ 2541 #define VERR_IEM_LABEL_IPE_4 (-5328) 2542 /** Recompiler: Label internal processing error \#5. */ 2543 #define VERR_IEM_LABEL_IPE_5 (-5329) 2544 /** Recompiler: Label internal processing error \#6. */ 2545 #define VERR_IEM_LABEL_IPE_6 (-5330) 2546 /** Recompiler: Label internal processing error \#7. */ 2547 #define VERR_IEM_LABEL_IPE_7 (-5331) 2548 2549 /** Recompiler: Out of host register. */ 2550 #define VERR_IEM_REG_OUT_OF_REGISTERS (-5340) 2551 /** Recompiler: No temporary host register available. */ 2552 #define VERR_IEM_REG_ALLOCATOR_NO_FREE_TMP (-5341) 2553 /** Recompiler: Register allocator internal processing error \#1. */ 2554 #define VERR_IEM_REG_IPE_1 (-5342) 2555 /** Recompiler: Register allocator internal processing error \#2. */ 2556 #define VERR_IEM_REG_IPE_2 (-5343) 2557 /** Recompiler: Register allocator internal processing error \#3. */ 2558 #define VERR_IEM_REG_IPE_3 (-5344) 2559 /** Recompiler: Register allocator internal processing error \#4. */ 2560 #define VERR_IEM_REG_IPE_4 (-5345) 2561 /** Recompiler: Register allocator internal processing error \#5. */ 2562 #define VERR_IEM_REG_IPE_5 (-5346) 2563 /** Recompiler: Register allocator internal processing error \#6. */ 2564 #define VERR_IEM_REG_IPE_6 (-5347) 2565 /** Recompiler: Register allocator internal processing error \#7. */ 2566 #define VERR_IEM_REG_IPE_7 (-5348) 2567 /** Recompiler: Register allocator internal processing error \#8. */ 2568 #define VERR_IEM_REG_IPE_8 (-5349) 2569 2570 /** Recompiler: Unimplemented case. */ 2571 #define VERR_IEM_EMIT_CASE_NOT_IMPLEMENTED_1 (-5360) 2510 2572 2511 2573 /** Restart the current instruction. For testing only. */ -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r101661 r101682 129 129 #ifdef VBOX_STRICT 130 130 static uint32_t iemNativeEmitGuestRegValueCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, 131 uint8_t idxReg, IEMNATIVEGSTREG enmGstReg) RT_NOEXCEPT;131 uint8_t idxReg, IEMNATIVEGSTREG enmGstReg); 132 132 #endif 133 133 #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO 134 static bool iemNativeDbgInfoAddNativeOffset(PIEMRECOMPILERSTATE pReNative, uint32_t off) RT_NOEXCEPT;135 static bool iemNativeDbgInfoAddLabel(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType, uint16_t uData) RT_NOEXCEPT;134 static void iemNativeDbgInfoAddNativeOffset(PIEMRECOMPILERSTATE pReNative, uint32_t off); 135 static void iemNativeDbgInfoAddLabel(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType, uint16_t uData); 136 136 #endif 137 137 … … 1713 1713 * call iemNativeLabelDefine() later to set it. 1714 1714 * 1715 * @returns Label ID. 1715 * @returns Label ID. Throws VBox status code on failure, so no need to check 1716 * the return value. 1716 1717 * @param pReNative The native recompile state. 1717 1718 * @param enmType The label type. … … 1721 1722 * certain type of labels. Default is zero. 1722 1723 */ 1723 DECLHIDDEN(uint32_t) iemNativeLabelCreate(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType, 1724 uint32_t offWhere /*= UINT32_MAX*/, uint16_t uData /*= 0*/) RT_NOEXCEPT 1724 DECL_HIDDEN_THROW(uint32_t) 1725 iemNativeLabelCreate(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType, 1726 uint32_t offWhere /*= UINT32_MAX*/, uint16_t uData /*= 0*/) 1725 1727 { 1726 1728 /* … … 1746 1748 { 1747 1749 #ifdef VBOX_STRICT 1748 Assert Return(uData == 0, UINT32_MAX);1749 Assert Return(offWhere == UINT32_MAX, UINT32_MAX);1750 #endif 1751 Assert Return(paLabels[i].off == UINT32_MAX, UINT32_MAX);1750 AssertStmt(uData == 0, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_1)); 1751 AssertStmt(offWhere == UINT32_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_1)); 1752 #endif 1753 AssertStmt(paLabels[i].off == UINT32_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_2)); 1752 1754 return i; 1753 1755 } … … 1762 1764 { 1763 1765 uint32_t cNew = pReNative->cLabelsAlloc; 1764 Assert Return(cNew, UINT32_MAX);1765 Assert Return(cLabels == cNew, UINT32_MAX);1766 AssertStmt(cNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_3)); 1767 AssertStmt(cLabels == cNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_3)); 1766 1768 cNew *= 2; 1767 Assert Return(cNew <= _64K, UINT32_MAX); /* IEMNATIVEFIXUP::idxLabel type restrict this */1769 AssertStmt(cNew <= _64K, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_TOO_MANY)); /* IEMNATIVEFIXUP::idxLabel type restrict this */ 1768 1770 paLabels = (PIEMNATIVELABEL)RTMemRealloc(paLabels, cNew * sizeof(paLabels[0])); 1769 Assert Return(paLabels, UINT32_MAX);1771 AssertStmt(paLabels, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_OUT_OF_MEMORY)); 1770 1772 pReNative->paLabels = paLabels; 1771 1773 pReNative->cLabelsAlloc = cNew; … … 1801 1803 * @param offWhere The position. 1802 1804 */ 1803 DECL HIDDEN(void) iemNativeLabelDefine(PIEMRECOMPILERSTATE pReNative, uint32_t idxLabel, uint32_t offWhere) RT_NOEXCEPT1804 { 1805 Assert ReturnVoid(idxLabel < pReNative->cLabels);1805 DECL_HIDDEN_THROW(void) iemNativeLabelDefine(PIEMRECOMPILERSTATE pReNative, uint32_t idxLabel, uint32_t offWhere) 1806 { 1807 AssertStmt(idxLabel < pReNative->cLabels, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_4)); 1806 1808 PIEMNATIVELABEL const pLabel = &pReNative->paLabels[idxLabel]; 1807 Assert ReturnVoid(pLabel->off == UINT32_MAX);1809 AssertStmt(pLabel->off == UINT32_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_5)); 1808 1810 pLabel->off = offWhere; 1809 1811 #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO … … 1839 1841 1840 1842 1841 1842 1843 /** 1843 1844 * Adds a fixup. 1844 1845 * 1845 * @ returns Success indicator.1846 * @throws VBox status code (int) on failure. 1846 1847 * @param pReNative The native recompile state. 1847 1848 * @param offWhere The instruction offset of the fixup location. … … 1850 1851 * @param offAddend Fixup addend if applicable to the type. Default is 0. 1851 1852 */ 1852 DECLHIDDEN(bool) iemNativeAddFixup(PIEMRECOMPILERSTATE pReNative, uint32_t offWhere, uint32_t idxLabel, 1853 IEMNATIVEFIXUPTYPE enmType, int8_t offAddend /*= 0*/) RT_NOEXCEPT 1853 DECL_HIDDEN_THROW(void) 1854 iemNativeAddFixup(PIEMRECOMPILERSTATE pReNative, uint32_t offWhere, uint32_t idxLabel, 1855 IEMNATIVEFIXUPTYPE enmType, int8_t offAddend /*= 0*/) 1854 1856 { 1855 1857 Assert(idxLabel <= UINT16_MAX); … … 1866 1868 { 1867 1869 uint32_t cNew = pReNative->cFixupsAlloc; 1868 Assert Return(cNew, false);1869 Assert Return(cFixups == cNew, false);1870 AssertStmt(cNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_IPE_1)); 1871 AssertStmt(cFixups == cNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_IPE_1)); 1870 1872 cNew *= 2; 1871 Assert Return(cNew <= _128K, false);1873 AssertStmt(cNew <= _128K, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_TOO_MANY)); 1872 1874 paFixups = (PIEMNATIVEFIXUP)RTMemRealloc(paFixups, cNew * sizeof(paFixups[0])); 1873 Assert Return(paFixups, false);1875 AssertStmt(paFixups, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_FIXUP_OUT_OF_MEMORY)); 1874 1876 pReNative->paFixups = paFixups; 1875 1877 pReNative->cFixupsAlloc = cNew; … … 1884 1886 paFixups[cFixups].offAddend = offAddend; 1885 1887 pReNative->cFixups = cFixups + 1; 1886 return true; 1887 } 1888 } 1889 1888 1890 1889 1891 /** 1890 1892 * Slow code path for iemNativeInstrBufEnsure. 1891 1893 */ 1892 DECLHIDDEN(PIEMNATIVEINSTR) iemNativeInstrBufEnsureSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1893 uint32_t cInstrReq) RT_NOEXCEPT 1894 DECL_HIDDEN_THROW(PIEMNATIVEINSTR) iemNativeInstrBufEnsureSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t cInstrReq) 1894 1895 { 1895 1896 /* Double the buffer size till we meet the request. */ … … 1902 1903 uint32_t const cbNew = cNew * sizeof(IEMNATIVEINSTR); 1903 1904 #ifdef RT_ARCH_ARM64 1904 AssertReturn(cbNew <= _1M, NULL); /* Limited by the branch instruction range (18+2 bits). */1905 uint32_t const cbMaxInstrBuf = _1M; /* Limited by the branch instruction range (18+2 bits). */ 1905 1906 #else 1906 AssertReturn(cbNew <= _2M, NULL); 1907 #endif 1907 uint32_t const cbMaxInstrBuf = _2M; 1908 #endif 1909 AssertStmt(cbNew <= cbMaxInstrBuf, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_INSTR_BUF_TOO_LARGE)); 1908 1910 1909 1911 void *pvNew = RTMemRealloc(pReNative->pInstrBuf, cbNew); 1910 Assert Return(pvNew, NULL);1912 AssertStmt(pvNew, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_INSTR_BUF_OUT_OF_MEMORY)); 1911 1913 1912 1914 pReNative->cInstrBufAlloc = cNew; … … 1918 1920 /** 1919 1921 * Grows the static debug info array used during recompilation. 1920 * @returns Pointer to the new debug info block, NULL on failure. 1921 */ 1922 DECL_NO_INLINE(static, PIEMTBDBG) iemNativeDbgInfoGrow(PIEMRECOMPILERSTATE pReNative, PIEMTBDBG pDbgInfo) RT_NOEXCEPT 1922 * 1923 * @returns Pointer to the new debug info block; throws VBox status code on 1924 * failure, so no need to check the return value. 1925 */ 1926 DECL_NO_INLINE(static, PIEMTBDBG) iemNativeDbgInfoGrow(PIEMRECOMPILERSTATE pReNative, PIEMTBDBG pDbgInfo) 1923 1927 { 1924 1928 uint32_t cNew = pReNative->cDbgInfoAlloc * 2; 1925 Assert Return(cNew < _1M && cNew != 0, NULL);1929 AssertStmt(cNew < _1M && cNew != 0, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_DBGINFO_IPE_1)); 1926 1930 pDbgInfo = (PIEMTBDBG)RTMemRealloc(pDbgInfo, RT_UOFFSETOF_DYN(IEMTBDBG, aEntries[cNew])); 1927 Assert Return(pDbgInfo, NULL);1931 AssertStmt(pDbgInfo, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_DBGINFO_OUT_OF_MEMORY)); 1928 1932 pReNative->pDbgInfo = pDbgInfo; 1929 1933 pReNative->cDbgInfoAlloc = cNew; … … 1935 1939 * Adds a new debug info uninitialized entry, returning the pointer to it. 1936 1940 */ 1937 DECL INLINE(PIEMTBDBGENTRY) iemNativeDbgInfoAddNewEntry(PIEMRECOMPILERSTATE pReNative, PIEMTBDBG pDbgInfo)1941 DECL_INLINE_THROW(PIEMTBDBGENTRY) iemNativeDbgInfoAddNewEntry(PIEMRECOMPILERSTATE pReNative, PIEMTBDBG pDbgInfo) 1938 1942 { 1939 1943 if (RT_LIKELY(pDbgInfo->cEntries < pReNative->cDbgInfoAlloc)) 1940 1944 { /* likely */ } 1941 1945 else 1942 {1943 1946 pDbgInfo = iemNativeDbgInfoGrow(pReNative, pDbgInfo); 1944 AssertReturn(pDbgInfo, NULL);1945 }1946 1947 return &pDbgInfo->aEntries[pDbgInfo->cEntries++]; 1947 1948 } … … 1951 1952 * Debug Info: Adds a native offset record, if necessary. 1952 1953 */ 1953 static bool iemNativeDbgInfoAddNativeOffset(PIEMRECOMPILERSTATE pReNative, uint32_t off) RT_NOEXCEPT1954 static void iemNativeDbgInfoAddNativeOffset(PIEMRECOMPILERSTATE pReNative, uint32_t off) 1954 1955 { 1955 1956 PIEMTBDBG pDbgInfo = pReNative->pDbgInfo; … … 1964 1965 { 1965 1966 if (pDbgInfo->aEntries[idx].NativeOffset.offNative == off) 1966 return true; 1967 AssertReturn(pDbgInfo->aEntries[idx].NativeOffset.offNative < off, false); 1967 return; 1968 AssertStmt(pDbgInfo->aEntries[idx].NativeOffset.offNative < off, 1969 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_DBGINFO_IPE_2)); 1968 1970 break; 1969 1971 } … … 1973 1975 */ 1974 1976 PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pDbgInfo); 1975 AssertReturn(pEntry, false);1976 1977 pEntry->NativeOffset.uType = kIemTbDbgEntryType_NativeOffset; 1977 1978 pEntry->NativeOffset.offNative = off; 1978 1979 return true;1980 1979 } 1981 1980 … … 1984 1983 * Debug Info: Record info about a label. 1985 1984 */ 1986 static bool iemNativeDbgInfoAddLabel(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType, uint16_t uData) RT_NOEXCEPT1985 static void iemNativeDbgInfoAddLabel(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType, uint16_t uData) 1987 1986 { 1988 1987 PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo); 1989 AssertReturn(pEntry, false);1990 1991 1988 pEntry->Label.uType = kIemTbDbgEntryType_Label; 1992 1989 pEntry->Label.uUnused = 0; 1993 1990 pEntry->Label.enmLabel = (uint8_t)enmType; 1994 1991 pEntry->Label.uData = uData; 1995 1996 return true;1997 1992 } 1998 1993 … … 2001 1996 * Debug Info: Record info about a threaded call. 2002 1997 */ 2003 static bool iemNativeDbgInfoAddThreadedCall(PIEMRECOMPILERSTATE pReNative, IEMTHREADEDFUNCS enmCall, bool fRecompiled) RT_NOEXCEPT1998 static void iemNativeDbgInfoAddThreadedCall(PIEMRECOMPILERSTATE pReNative, IEMTHREADEDFUNCS enmCall, bool fRecompiled) 2004 1999 { 2005 2000 PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo); 2006 AssertReturn(pEntry, false);2007 2008 2001 pEntry->ThreadedCall.uType = kIemTbDbgEntryType_ThreadedCall; 2009 2002 pEntry->ThreadedCall.fRecompiled = fRecompiled; 2010 2003 pEntry->ThreadedCall.uUnused = 0; 2011 2004 pEntry->ThreadedCall.enmCall = (uint16_t)enmCall; 2012 2013 return true;2014 2005 } 2015 2006 … … 2018 2009 * Debug Info: Record info about a new guest instruction. 2019 2010 */ 2020 static bool iemNativeDbgInfoAddGuestInstruction(PIEMRECOMPILERSTATE pReNative, uint32_t fExec) RT_NOEXCEPT2011 static void iemNativeDbgInfoAddGuestInstruction(PIEMRECOMPILERSTATE pReNative, uint32_t fExec) 2021 2012 { 2022 2013 PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo); 2023 AssertReturn(pEntry, false);2024 2025 2014 pEntry->GuestInstruction.uType = kIemTbDbgEntryType_GuestInstruction; 2026 2015 pEntry->GuestInstruction.uUnused = 0; 2027 2016 pEntry->GuestInstruction.fExec = fExec; 2028 2029 return true;2030 2017 } 2031 2018 … … 2034 2021 * Debug Info: Record info about guest register shadowing. 2035 2022 */ 2036 static booliemNativeDbgInfoAddGuestRegShadowing(PIEMRECOMPILERSTATE pReNative, IEMNATIVEGSTREG enmGstReg,2037 uint8_t idxHstReg = UINT8_MAX, uint8_t idxHstRegPrev = UINT8_MAX) RT_NOEXCEPT2023 static void iemNativeDbgInfoAddGuestRegShadowing(PIEMRECOMPILERSTATE pReNative, IEMNATIVEGSTREG enmGstReg, 2024 uint8_t idxHstReg = UINT8_MAX, uint8_t idxHstRegPrev = UINT8_MAX) 2038 2025 { 2039 2026 PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo); 2040 AssertReturn(pEntry, false);2041 2042 2027 pEntry->GuestRegShadowing.uType = kIemTbDbgEntryType_GuestRegShadowing; 2043 2028 pEntry->GuestRegShadowing.uUnused = 0; … … 2045 2030 pEntry->GuestRegShadowing.idxHstReg = idxHstReg; 2046 2031 pEntry->GuestRegShadowing.idxHstRegPrev = idxHstRegPrev; 2047 2048 return true;2049 2032 } 2050 2033 … … 2203 2186 * This ASSUMES the caller has done the minimal/optimal allocation checks and 2204 2187 * failed. 2205 */ 2206 static uint8_t iemNativeRegAllocFindFree(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fAllowVolatile) RT_NOEXCEPT 2188 * 2189 * @returns Host register number on success; throws VBox status code on failure, so no 2190 * need to check the return value. 2191 */ 2192 static uint8_t iemNativeRegAllocFindFree(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fAllowVolatile) 2207 2193 { 2208 2194 uint32_t fRegMask = fAllowVolatile … … 2257 2243 if (pReNative->Core.aVars[idxVar].enmKind == kIemNativeVarKind_Stack) 2258 2244 { 2259 AssertReturn(pReNative->Core.aVars[idxVar].idxStackSlot != UINT8_MAX, UINT8_MAX); 2260 uint32_t off = *poff; 2261 *poff = off = iemNativeEmitStoreGprByBp(pReNative, off, 2262 pReNative->Core.aVars[idxVar].idxStackSlot * sizeof(uint64_t) 2263 - IEMNATIVE_FP_OFF_STACK_VARS, 2264 idxReg); 2265 AssertReturn(off != UINT32_MAX, UINT8_MAX); 2245 AssertStmt(pReNative->Core.aVars[idxVar].idxStackSlot != UINT8_MAX, 2246 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_8)); 2247 *poff = iemNativeEmitStoreGprByBp(pReNative, *poff, 2248 pReNative->Core.aVars[idxVar].idxStackSlot * sizeof(uint64_t) 2249 - IEMNATIVE_FP_OFF_STACK_VARS, 2250 idxReg); 2266 2251 } 2267 2252 … … 2276 2261 } 2277 2262 2278 AssertFailed Return(UINT8_MAX);2263 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_OUT_OF_REGISTERS)); 2279 2264 } 2280 2265 … … 2352 2337 else 2353 2338 { 2354 Assert Return(pReNative->Core.aVars[idxVar].idxStackSlot != UINT8_MAX, UINT32_MAX);2339 AssertStmt(pReNative->Core.aVars[idxVar].idxStackSlot != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_7)); 2355 2340 off = iemNativeEmitStoreGprByBp(pReNative, off, 2356 2341 pReNative->Core.aVars[idxVar].idxStackSlot * sizeof(uint64_t) 2357 2342 - IEMNATIVE_FP_OFF_STACK_VARS, 2358 2343 idxRegOld); 2359 AssertReturn(off != UINT32_MAX, UINT32_MAX);2360 2344 2361 2345 pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxRegOld); … … 2375 2359 * up a register. 2376 2360 * 2377 * @returns The host register number, UINT8_MAX on failure. 2361 * @returns The host register number; throws VBox status code on failure, 2362 * so no need to check the return value. 2378 2363 * @param pReNative The native recompile state. 2379 2364 * @param poff Pointer to the variable with the code buffer position. … … 2384 2369 * (@c false, for iemNativeRegAllocTmpForGuestReg()). 2385 2370 */ 2386 DECLHIDDEN(uint8_t) iemNativeRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 2387 bool fPreferVolatile /*= true*/) RT_NOEXCEPT 2371 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fPreferVolatile /*= true*/) 2388 2372 { 2389 2373 /* … … 2408 2392 { 2409 2393 idxReg = iemNativeRegAllocFindFree(pReNative, poff, true /*fAllowVolatile*/); 2410 Assert Return(idxReg != UINT8_MAX, UINT8_MAX);2394 AssertStmt(idxReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_TMP)); 2411 2395 } 2412 2396 return iemNativeRegMarkAllocated(pReNative, idxReg, kIemNativeWhat_Tmp); … … 2423 2407 * read-only. Free using iemNativeRegFreeTmpImm. 2424 2408 * 2425 * @returns The host register number, UINT8_MAX on failure. 2409 * @returns The host register number; throws VBox status code on failure, so no 2410 * need to check the return value. 2426 2411 * @param pReNative The native recompile state. 2427 2412 * @param poff Pointer to the variable with the code buffer position. … … 2434 2419 * @note Reusing immediate values has not been implemented yet. 2435 2420 */ 2436 DECLHIDDEN(uint8_t) iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm, 2437 bool fPreferVolatile /*= true*/) RT_NOEXCEPT 2438 { 2439 uint8_t idxReg = iemNativeRegAllocTmp(pReNative, poff, fPreferVolatile); 2440 if (idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs)) 2441 { 2442 uint32_t off = *poff; 2443 *poff = off = iemNativeEmitLoadGprImm64(pReNative, off, idxReg, uImm); 2444 AssertReturnStmt(off != UINT32_MAX, iemNativeRegFreeTmp(pReNative, idxReg), UINT8_MAX); 2445 } 2421 DECL_HIDDEN_THROW(uint8_t) 2422 iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm, bool fPreferVolatile /*= true*/) 2423 { 2424 uint8_t const idxReg = iemNativeRegAllocTmp(pReNative, poff, fPreferVolatile); 2425 *poff = iemNativeEmitLoadGprImm64(pReNative, *poff, idxReg, uImm); 2446 2426 return idxReg; 2447 2427 } … … 2542 2522 * be emitted if we have to free up a register to satify the request. 2543 2523 * 2544 * @returns The host register number, UINT8_MAX on failure. 2524 * @returns The host register number; throws VBox status code on failure, so no 2525 * need to check the return value. 2545 2526 * @param pReNative The native recompile state. 2546 2527 * @param poff Pointer to the variable with the code buffer … … 2552 2533 * @sa iemNativeRegAllocTmpForGuestRegIfAlreadyPresent 2553 2534 */ 2554 DECLHIDDEN(uint8_t) iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 2555 IEMNATIVEGSTREG enmGstReg, IEMNATIVEGSTREGUSE enmIntendedUse) RT_NOEXCEPT 2535 DECL_HIDDEN_THROW(uint8_t) 2536 iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 2537 IEMNATIVEGSTREG enmGstReg, IEMNATIVEGSTREGUSE enmIntendedUse) 2556 2538 { 2557 2539 Assert(enmGstReg < kIemNativeGstReg_End && g_aGstShadowInfo[enmGstReg].cb != 0); … … 2584 2566 { 2585 2567 uint8_t const idxRegNew = iemNativeRegAllocTmp(pReNative, poff); 2586 Assert(idxRegNew < RT_ELEMENTS(pReNative->Core.aHstRegs)); 2587 2588 uint32_t off = *poff; 2589 *poff = off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegNew, idxReg); 2590 AssertReturn(off != UINT32_MAX, UINT8_MAX); 2568 2569 *poff = iemNativeEmitLoadGprFromGpr(pReNative, *poff, idxRegNew, idxReg); 2591 2570 2592 2571 Log12(("iemNativeRegAllocTmpForGuestReg: Duplicated %s for guest %s into %s for destructive calc\n", … … 2622 2601 /** @todo share register for readonly access. */ 2623 2602 uint8_t const idxRegNew = iemNativeRegAllocTmp(pReNative, poff, enmIntendedUse == kIemNativeGstRegUse_Calculation); 2624 AssertReturn(idxRegNew < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT8_MAX); 2625 2626 uint32_t off = *poff; 2627 *poff = off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegNew, idxReg); 2628 AssertReturn(off != UINT32_MAX, UINT8_MAX); 2603 2604 *poff = iemNativeEmitLoadGprFromGpr(pReNative, *poff, idxRegNew, idxReg); 2629 2605 2630 2606 if (enmIntendedUse != kIemNativeGstRegUse_ForUpdate) … … 2644 2620 #ifdef VBOX_STRICT 2645 2621 /* Strict builds: Check that the value is correct. */ 2646 uint32_t off = *poff; 2647 *poff = off = iemNativeEmitGuestRegValueCheck(pReNative, off, idxReg, enmGstReg); 2648 AssertReturn(off != UINT32_MAX, UINT8_MAX); 2622 *poff = iemNativeEmitGuestRegValueCheck(pReNative, *poff, idxReg, enmGstReg); 2649 2623 #endif 2650 2624 … … 2656 2630 */ 2657 2631 uint8_t const idxRegNew = iemNativeRegAllocTmp(pReNative, poff, enmIntendedUse == kIemNativeGstRegUse_Calculation); 2658 AssertReturn(idxRegNew < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT8_MAX); 2659 2660 uint32_t off = *poff; 2661 *poff = off = iemNativeEmitLoadGprWithGstShadowReg(pReNative, off, idxRegNew, enmGstReg); 2662 AssertReturn(off != UINT32_MAX, UINT8_MAX); 2632 2633 *poff = iemNativeEmitLoadGprWithGstShadowReg(pReNative, *poff, idxRegNew, enmGstReg); 2663 2634 2664 2635 if (enmIntendedUse != kIemNativeGstRegUse_Calculation) 2665 iemNativeRegMarkAsGstRegShadow(pReNative, idxRegNew, enmGstReg, off);2636 iemNativeRegMarkAsGstRegShadow(pReNative, idxRegNew, enmGstReg, *poff); 2666 2637 Log12(("iemNativeRegAllocTmpForGuestReg: Allocated %s for guest %s %s\n", 2667 2638 g_apszIemNativeHstRegNames[idxRegNew], g_aGstShadowInfo[enmGstReg].pszName, s_pszIntendedUse[enmIntendedUse])); … … 2682 2653 * The intended use can only be readonly! 2683 2654 * 2684 * @returns The host register number, UINT8_MAX on failure.2655 * @returns The host register number, UINT8_MAX if not present. 2685 2656 * @param pReNative The native recompile state. 2686 2657 * @param poff Pointer to the instruction buffer offset. … … 2688 2659 * found. 2689 2660 * @param enmGstReg The guest register that will is to be updated. 2661 * @note In strict builds, this may throw instruction buffer growth failures. 2662 * Non-strict builds will not throw anything. 2690 2663 * @sa iemNativeRegAllocTmpForGuestReg 2691 2664 */ 2692 DECL HIDDEN(uint8_t) iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,2693 IEMNATIVEGSTREG enmGstReg) RT_NOEXCEPT 2665 DECL_HIDDEN_THROW(uint8_t) 2666 iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg) 2694 2667 { 2695 2668 Assert(enmGstReg < kIemNativeGstReg_End && g_aGstShadowInfo[enmGstReg].cb != 0); … … 2719 2692 #ifdef VBOX_STRICT 2720 2693 /* Strict builds: Check that the value is correct. */ 2721 uint32_t off = *poff; 2722 *poff = off = iemNativeEmitGuestRegValueCheck(pReNative, off, idxReg, enmGstReg); 2723 AssertReturn(off != UINT32_MAX, UINT8_MAX); 2694 *poff = iemNativeEmitGuestRegValueCheck(pReNative, *poff, idxReg, enmGstReg); 2724 2695 #else 2725 2696 RT_NOREF(poff); … … 2733 2704 2734 2705 2735 DECL HIDDEN(uint8_t) iemNativeRegAllocVar(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint8_t idxVar) RT_NOEXCEPT;2706 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocVar(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint8_t idxVar); 2736 2707 2737 2708 … … 2739 2710 * Allocates argument registers for a function call. 2740 2711 * 2741 * @returns New code buffer offset on success, UINT32_MAX on failure. 2712 * @returns New code buffer offset on success; throws VBox status code on failure, so no 2713 * need to check the return value. 2742 2714 * @param pReNative The native recompile state. 2743 2715 * @param off The current code buffer offset. 2744 2716 * @param cArgs The number of arguments the function call takes. 2745 2717 */ 2746 DECLHIDDEN(uint32_t) iemNativeRegAllocArgs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs) RT_NOEXCEPT 2747 { 2748 AssertReturn(cArgs <= IEMNATIVE_CALL_ARG_GREG_COUNT + IEMNATIVE_FRAME_STACK_ARG_COUNT, false); 2718 DECL_HIDDEN_THROW(uint32_t) iemNativeRegAllocArgs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs) 2719 { 2720 AssertStmt(cArgs <= IEMNATIVE_CALL_ARG_GREG_COUNT + IEMNATIVE_FRAME_STACK_ARG_COUNT, 2721 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_4)); 2749 2722 Assert(RT_ELEMENTS(g_aidxIemNativeCallRegs) == IEMNATIVE_CALL_ARG_GREG_COUNT); 2750 2723 Assert(RT_ELEMENTS(g_afIemNativeCallRegs) == IEMNATIVE_CALL_ARG_GREG_COUNT); … … 2780 2753 { 2781 2754 uint8_t const idxVar = pReNative->Core.aHstRegs[idxReg].idxVar; 2782 AssertReturn(idxVar < RT_ELEMENTS(pReNative->Core.aVars), false); 2755 AssertStmt(idxVar < RT_ELEMENTS(pReNative->Core.aVars), 2756 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_5)); 2783 2757 Assert(pReNative->Core.aVars[idxVar].idxReg == idxReg); 2784 2758 Assert(pReNative->Core.bmVars & RT_BIT_32(idxVar)); … … 2789 2763 { 2790 2764 off = iemNativeRegMoveOrSpillStackVar(pReNative, off, idxVar); 2791 AssertReturn(off != UINT32_MAX, false);2792 2765 Assert(!(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg))); 2793 2766 } … … 2798 2771 case kIemNativeWhat_Arg: 2799 2772 case kIemNativeWhat_rc: 2800 AssertFailed Return(false);2773 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_5)); 2801 2774 default: 2802 AssertFailed Return(false);2775 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_6)); 2803 2776 } 2804 2777 … … 2822 2795 2823 2796 2824 DECL HIDDEN(uint8_t) iemNativeRegAssignRc(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;2797 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAssignRc(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg); 2825 2798 2826 2799 … … 2903 2876 * @param fFreeArgVars Whether to free argument variables for the call. 2904 2877 */ 2905 DECL HIDDEN(uint32_t) iemNativeRegMoveAndFreeAndFlushAtCall(PIEMRECOMPILERSTATE pReNative, uint32_t off,2906 uint8_t cArgs, bool fFreeArgVars) RT_NOEXCEPT 2878 DECL_HIDDEN_THROW(uint32_t) 2879 iemNativeRegMoveAndFreeAndFlushAtCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs, bool fFreeArgVars) 2907 2880 { 2908 2881 /* 2909 2882 * Free argument variables first (simplified). 2910 2883 */ 2911 Assert Return(cArgs <= RT_ELEMENTS(pReNative->Core.aidxArgVars), UINT32_MAX);2884 AssertStmt(cArgs <= RT_ELEMENTS(pReNative->Core.aidxArgVars), IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_6)); 2912 2885 if (fFreeArgVars && cArgs > 0) 2913 2886 { … … 2957 2930 pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX; 2958 2931 else 2959 {2960 2932 off = iemNativeRegMoveOrSpillStackVar(pReNative, off, idxVar); 2961 AssertReturn(off != UINT32_MAX, UINT32_MAX);2962 }2963 2933 continue; 2964 2934 } … … 2979 2949 case kIemNativeWhat_Invalid: 2980 2950 case kIemNativeWhat_End: 2981 AssertFailed Return(UINT32_MAX);2951 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_1)); 2982 2952 } 2983 AssertFailed Return(UINT32_MAX);2953 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_2)); 2984 2954 } 2985 2955 … … 3079 3049 * RIP updates, since these are the most common ones. 3080 3050 */ 3081 DECL HIDDEN(uint32_t) iemNativeRegFlushPendingWrites(PIEMRECOMPILERSTATE pReNative, uint32_t off) RT_NOEXCEPT3051 DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushPendingWrites(PIEMRECOMPILERSTATE pReNative, uint32_t off) 3082 3052 { 3083 3053 RT_NOREF(pReNative, off); … … 3103 3073 * that is something the caller needs to do if applicable. 3104 3074 */ 3105 DECL HIDDEN(uint32_t) iemNativeEmitLoadGprWithGstShadowReg(PIEMRECOMPILERSTATE pReNative, uint32_t off,3106 uint8_t idxHstReg, IEMNATIVEGSTREG enmGstReg) RT_NOEXCEPT 3075 DECL_HIDDEN_THROW(uint32_t) 3076 iemNativeEmitLoadGprWithGstShadowReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxHstReg, IEMNATIVEGSTREG enmGstReg) 3107 3077 { 3108 3078 Assert((unsigned)enmGstReg < RT_ELEMENTS(g_aGstShadowInfo)); … … 3122 3092 #endif 3123 3093 default: 3124 AssertFailed Return(UINT32_MAX);3094 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IPE_NOT_REACHED_DEFAULT_CASE)); 3125 3095 } 3126 3096 } … … 3136 3106 * Trashes EFLAGS on AMD64. 3137 3107 */ 3138 static uint32_t iemNativeEmitGuestRegValueCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off,3139 uint8_t idxReg, IEMNATIVEGSTREG enmGstReg) RT_NOEXCEPT 3108 static uint32_t 3109 iemNativeEmitGuestRegValueCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxReg, IEMNATIVEGSTREG enmGstReg) 3140 3110 { 3141 3111 # ifdef RT_ARCH_AMD64 3142 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32); 3143 AssertReturn(pbCodeBuf, UINT32_MAX); 3112 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32); 3144 3113 3145 3114 /* cmp reg, [mem] */ … … 3159 3128 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP; 3160 3129 else 3161 AssertReturn(g_aGstShadowInfo[enmGstReg].cb == sizeof(uint32_t), UINT32_MAX); 3130 AssertStmt(g_aGstShadowInfo[enmGstReg].cb == sizeof(uint32_t), 3131 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_7)); 3162 3132 if (idxReg >= 8) 3163 3133 pbCodeBuf[off++] = X86_OP_REX_R; … … 3230 3200 off = iemNativeEmitLoadGprWithGstShadowReg(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, enmGstReg); 3231 3201 3232 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 3233 AssertReturn(pu32CodeBuf, UINT32_MAX); 3202 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 3234 3203 /* sub tmp0, tmp0, idxReg */ 3235 3204 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(true /*fSub*/, IEMNATIVE_REG_FIXED_TMP0, IEMNATIVE_REG_FIXED_TMP0, idxReg); … … 3253 3222 * from the code if either are non-zero. 3254 3223 */ 3255 DECL HIDDEN(uint32_t) iemNativeEmitCheckCallRetAndPassUp(PIEMRECOMPILERSTATE pReNative, uint32_t off,3256 uint8_t idxInstr) RT_NOEXCEPT 3224 DECL_HIDDEN_THROW(uint32_t) 3225 iemNativeEmitCheckCallRetAndPassUp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr) 3257 3226 { 3258 3227 #ifdef RT_ARCH_AMD64 … … 3269 3238 /* edx = eax | rcPassUp */ 3270 3239 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 3271 AssertReturn(pbCodeBuf, UINT32_MAX);3272 3240 pbCodeBuf[off++] = 0x0b; /* or edx, eax */ 3273 3241 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, X86_GREG_xDX, X86_GREG_xAX); … … 3287 3255 3288 3256 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 3289 AssertReturn(pu32CodeBuf, UINT32_MAX);3290 3257 3291 3258 pu32CodeBuf[off++] = Armv8A64MkInstrOrr(ARMV8_A64_REG_X4, ARMV8_A64_REG_X3, ARMV8_A64_REG_X0, false /*f64Bit*/); 3292 3259 3293 3260 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, kIemNativeLabelType_NonZeroRetOrPassUp); 3294 AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX); 3295 AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5), UINT32_MAX); 3261 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5); 3296 3262 pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(true /*fJmpIfNotZero*/, ARMV8_A64_REG_X4, false /*f64Bit*/); 3297 3263 … … 3314 3280 * @param idxInstr The current instruction. 3315 3281 */ 3316 DECL HIDDEN(uint32_t) iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off,3317 3282 DECL_HIDDEN_THROW(uint32_t) 3283 iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxAddrReg, uint8_t idxInstr) 3318 3284 { 3319 3285 RT_NOREF(idxInstr); … … 3332 3298 */ 3333 3299 uint8_t const iTmpReg = iemNativeRegAllocTmp(pReNative, &off); 3334 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);3335 3300 3336 3301 off = iemNativeEmitLoadGprFromGpr(pReNative, off, iTmpReg, idxAddrReg); … … 3368 3333 */ 3369 3334 uint8_t const iTmpReg = iemNativeRegAllocTmp(pReNative, &off); 3370 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);3371 3335 3372 3336 off = iemNativeEmitLoadGprImm64(pReNative, off, iTmpReg, UINT64_C(0x800000000000)); … … 3406 3370 * @param idxInstr The current instruction. 3407 3371 */ 3408 DECLHIDDEN(uint32_t) iemNativeEmitCheckGpr32AgainstSegLimitMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3409 uint8_t idxAddrReg, uint8_t idxSegReg, uint8_t idxInstr) 3372 DECL_HIDDEN_THROW(uint32_t) 3373 iemNativeEmitCheckGpr32AgainstSegLimitMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3374 uint8_t idxAddrReg, uint8_t idxSegReg, uint8_t idxInstr) 3410 3375 { 3411 3376 /* … … 3416 3381 3417 3382 /** @todo implement expand down/whatnot checking */ 3418 Assert Return(idxSegReg == X86_SREG_CS, UINT32_MAX);3383 AssertStmt(idxSegReg == X86_SREG_CS, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_EMIT_CASE_NOT_IMPLEMENTED_1)); 3419 3384 3420 3385 uint8_t const iTmpLimReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, 3421 3386 (IEMNATIVEGSTREG)(kIemNativeGstReg_SegLimitFirst + idxSegReg), 3422 3387 kIemNativeGstRegUse_ForUpdate); 3423 AssertReturn(iTmpLimReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);3424 3388 3425 3389 off = iemNativeEmitCmpGpr32WithGpr(pReNative, off, idxAddrReg, iTmpLimReg); … … 3481 3445 # endif 3482 3446 #endif 3483 AssertReturn(off != UINT32_MAX, off);3484 3447 3485 3448 /* … … 3568 3531 */ 3569 3532 off = iemNativeEmitCheckCallRetAndPassUp(pReNative, off, pCallEntry->idxInstr); 3570 AssertReturn(off != UINT32_MAX, off);3571 3533 3572 3534 return off; … … 3673 3635 */ 3674 3636 off = iemNativeEmitGprZero(pReNative,off, IEMNATIVE_CALL_RET_GREG); 3675 AssertReturn(off != UINT32_MAX, UINT32_MAX);3676 3637 3677 3638 /* … … 3679 3640 */ 3680 3641 uint32_t const idxReturn = iemNativeLabelCreate(pReNative, kIemNativeLabelType_Return, off); 3681 AssertReturn(idxReturn != UINT32_MAX, UINT32_MAX);3682 3642 *pidxReturnLabel = idxReturn; 3683 3643 … … 3686 3646 */ 3687 3647 #ifdef RT_ARCH_AMD64 3688 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20); 3689 AssertReturn(pbCodeBuf, UINT32_MAX); 3648 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20); 3690 3649 3691 3650 /* Reposition esp at the r15 restore point. */ … … 3714 3673 3715 3674 #elif RT_ARCH_ARM64 3716 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10); 3717 AssertReturn(pu32CodeBuf, UINT32_MAX); 3675 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10); 3718 3676 3719 3677 /* ldp x19, x20, [sp #IEMNATIVE_FRAME_VAR_SIZE]! ; Unallocate the variable space and restore x19+x20. */ … … 3771 3729 * unwind description for all the code. 3772 3730 */ 3773 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32); 3774 AssertReturn(pbCodeBuf, UINT32_MAX); 3731 uint8_t *const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32); 3775 3732 pbCodeBuf[off++] = 0x50 + X86_GREG_xBP; /* push rbp */ 3776 3733 pbCodeBuf[off++] = X86_OP_REX_W; /* mov rbp, rsp */ … … 3814 3771 * return address our selves here. We save all non-volatile registers. 3815 3772 */ 3816 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10); 3817 AssertReturn(pu32CodeBuf, UINT32_MAX); 3773 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10); 3818 3774 3819 3775 # ifdef RT_OS_DARWIN /** @todo This seems to be requirement by libunwind for JIT FDEs. Investigate further as been unable … … 3888 3844 return iemNativeEmitCImplCall1(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr, a0) 3889 3845 3890 DECL INLINE(uint32_t) iemNativeEmitCImplCall1(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr,3891 uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0)3846 DECL_INLINE_THROW(uint32_t) iemNativeEmitCImplCall1(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr, 3847 uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0) 3892 3848 { 3893 3849 return iemNativeEmitCImplCall(pReNative, off, idxInstr, pfnCImpl, cbInstr, 1, uArg0, 0, 0); … … 3898 3854 return iemNativeEmitCImplCall2(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr, a0, a1) 3899 3855 3900 DECL INLINE(uint32_t) iemNativeEmitCImplCall2(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr,3901 uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0, uint64_t uArg1)3856 DECL_INLINE_THROW(uint32_t) iemNativeEmitCImplCall2(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr, 3857 uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0, uint64_t uArg1) 3902 3858 { 3903 3859 return iemNativeEmitCImplCall(pReNative, off, idxInstr, pfnCImpl, cbInstr, 2, uArg0, uArg1, 0); … … 3908 3864 return iemNativeEmitCImplCall3(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr, a0, a1, a2) 3909 3865 3910 DECLINLINE(uint32_t) iemNativeEmitCImplCall3(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr, 3911 uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0, uint64_t uArg1, uint64_t uArg2) 3866 DECL_INLINE_THROW(uint32_t) iemNativeEmitCImplCall3(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr, 3867 uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0, uint64_t uArg1, 3868 uint64_t uArg2) 3912 3869 { 3913 3870 return iemNativeEmitCImplCall(pReNative, off, idxInstr, pfnCImpl, cbInstr, 3, uArg0, uArg1, uArg2); … … 3920 3877 3921 3878 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64(a_cbInstr) \ 3922 off = iemNativeEmitAddToRip64AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \ 3923 AssertReturn(off != UINT32_MAX, UINT32_MAX) 3879 off = iemNativeEmitAddToRip64AndFinishingNoFlags(pReNative, off, (a_cbInstr)) 3924 3880 3925 3881 /** Same as iemRegAddToRip64AndFinishingNoFlags. */ 3926 DECLINLINE(uint32_t) iemNativeEmitAddToRip64AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr) 3882 DECL_INLINE_THROW(uint32_t) 3883 iemNativeEmitAddToRip64AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr) 3927 3884 { 3928 3885 /* Allocate a temporary PC register. */ 3929 3886 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate); 3930 AssertReturn(idxPcReg != UINT8_MAX, UINT32_MAX);3931 3887 3932 3888 /* Perform the addition and store the result. */ … … 3942 3898 3943 3899 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32(a_cbInstr) \ 3944 off = iemNativeEmitAddToEip32AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \ 3945 AssertReturn(off != UINT32_MAX, UINT32_MAX) 3900 off = iemNativeEmitAddToEip32AndFinishingNoFlags(pReNative, off, (a_cbInstr)) 3946 3901 3947 3902 /** Same as iemRegAddToEip32AndFinishingNoFlags. */ 3948 DECLINLINE(uint32_t) iemNativeEmitAddToEip32AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr) 3903 DECL_INLINE_THROW(uint32_t) 3904 iemNativeEmitAddToEip32AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr) 3949 3905 { 3950 3906 /* Allocate a temporary PC register. */ 3951 3907 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate); 3952 AssertReturn(idxPcReg != UINT8_MAX, UINT32_MAX);3953 3908 3954 3909 /* Perform the addition and store the result. */ … … 3964 3919 3965 3920 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16(a_cbInstr) \ 3966 off = iemNativeEmitAddToIp16AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \ 3967 AssertReturn(off != UINT32_MAX, UINT32_MAX) 3921 off = iemNativeEmitAddToIp16AndFinishingNoFlags(pReNative, off, (a_cbInstr)) 3968 3922 3969 3923 /** Same as iemRegAddToIp16AndFinishingNoFlags. */ 3970 DECLINLINE(uint32_t) iemNativeEmitAddToIp16AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr) 3924 DECL_INLINE_THROW(uint32_t) 3925 iemNativeEmitAddToIp16AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr) 3971 3926 { 3972 3927 /* Allocate a temporary PC register. */ 3973 3928 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate); 3974 AssertReturn(idxPcReg != UINT8_MAX, UINT32_MAX);3975 3929 3976 3930 /* Perform the addition and store the result. */ … … 3992 3946 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize) \ 3993 3947 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \ 3994 (a_enmEffOpSize), pCallEntry->idxInstr); \ 3995 AssertReturn(off != UINT32_MAX, UINT32_MAX) 3948 (a_enmEffOpSize), pCallEntry->idxInstr) 3996 3949 3997 3950 3998 3951 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr) \ 3999 3952 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \ 4000 IEMMODE_16BIT, pCallEntry->idxInstr); \ 4001 AssertReturn(off != UINT32_MAX, UINT32_MAX) 3953 IEMMODE_16BIT, pCallEntry->idxInstr) 4002 3954 4003 3955 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr) \ 4004 3956 off = iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), \ 4005 IEMMODE_64BIT, pCallEntry->idxInstr); \ 4006 AssertReturn(off != UINT32_MAX, UINT32_MAX) 3957 IEMMODE_64BIT, pCallEntry->idxInstr) 4007 3958 4008 3959 /** Same as iemRegRip64RelativeJumpS8AndFinishNoFlags, 4009 3960 * iemRegRip64RelativeJumpS16AndFinishNoFlags and 4010 3961 * iemRegRip64RelativeJumpS32AndFinishNoFlags. */ 4011 DECL INLINE(uint32_t) iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off,4012 uint8_t cbInstr, int32_t offDisp, IEMMODE enmEffOpSize,4013 3962 DECL_INLINE_THROW(uint32_t) 3963 iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr, 3964 int32_t offDisp, IEMMODE enmEffOpSize, uint8_t idxInstr) 4014 3965 { 4015 3966 Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT); … … 4020 3971 /* Allocate a temporary PC register. */ 4021 3972 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate); 4022 AssertReturn(idxPcReg != UINT8_MAX, UINT32_MAX);4023 3973 4024 3974 /* Perform the addition. */ … … 4047 3997 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32(a_i8, a_cbInstr, a_enmEffOpSize) \ 4048 3998 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), \ 4049 (a_enmEffOpSize), pCallEntry->idxInstr); \ 4050 AssertReturn(off != UINT32_MAX, UINT32_MAX) 3999 (a_enmEffOpSize), pCallEntry->idxInstr) 4051 4000 4052 4001 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr) \ 4053 4002 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), \ 4054 IEMMODE_16BIT, pCallEntry->idxInstr); \ 4055 AssertReturn(off != UINT32_MAX, UINT32_MAX) 4003 IEMMODE_16BIT, pCallEntry->idxInstr) 4056 4004 4057 4005 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr) \ 4058 4006 off = iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), \ 4059 IEMMODE_32BIT, pCallEntry->idxInstr); \ 4060 AssertReturn(off != UINT32_MAX, UINT32_MAX) 4007 IEMMODE_32BIT, pCallEntry->idxInstr) 4061 4008 4062 4009 /** Same as iemRegEip32RelativeJumpS8AndFinishNoFlags, 4063 4010 * iemRegEip32RelativeJumpS16AndFinishNoFlags and 4064 4011 * iemRegEip32RelativeJumpS32AndFinishNoFlags. */ 4065 DECL INLINE(uint32_t) iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off,4066 uint8_t cbInstr, int32_t offDisp, IEMMODE enmEffOpSize,4067 4012 DECL_INLINE_THROW(uint32_t) 4013 iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr, 4014 int32_t offDisp, IEMMODE enmEffOpSize, uint8_t idxInstr) 4068 4015 { 4069 4016 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT); … … 4074 4021 /* Allocate a temporary PC register. */ 4075 4022 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate); 4076 AssertReturn(idxPcReg != UINT8_MAX, UINT32_MAX);4077 4023 4078 4024 /* Perform the addition. */ … … 4096 4042 4097 4043 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16(a_i8, a_cbInstr) \ 4098 off = iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), pCallEntry->idxInstr); \ 4099 AssertReturn(off != UINT32_MAX, UINT32_MAX) 4044 off = iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), pCallEntry->idxInstr) 4100 4045 4101 4046 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr) \ 4102 off = iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), pCallEntry->idxInstr); \ 4103 AssertReturn(off != UINT32_MAX, UINT32_MAX) 4047 off = iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), pCallEntry->idxInstr) 4104 4048 4105 4049 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr) \ 4106 off = iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), pCallEntry->idxInstr); \ 4107 AssertReturn(off != UINT32_MAX, UINT32_MAX) 4050 off = iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), pCallEntry->idxInstr) 4108 4051 4109 4052 /** Same as iemRegIp16RelativeJumpS8AndFinishNoFlags. */ 4110 DECLINLINE(uint32_t) iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, 4111 uint8_t cbInstr, int32_t offDisp, uint8_t idxInstr) 4053 DECL_INLINE_THROW(uint32_t) 4054 iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, 4055 uint8_t cbInstr, int32_t offDisp, uint8_t idxInstr) 4112 4056 { 4113 4057 /* We speculatively modify PC and may raise #GP(0), so make sure the right value is in CPUMCTX. */ … … 4116 4060 /* Allocate a temporary PC register. */ 4117 4061 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate); 4118 AssertReturn(idxPcReg != UINT8_MAX, UINT32_MAX);4119 4062 4120 4063 /* Perform the addition, clamp the result, check limit (may #GP(0) + exit TB) and store the result. */ … … 4141 4084 * (too many nestings) 4142 4085 */ 4143 DECL INLINE(PIEMNATIVECOND) iemNativeCondPushIf(PIEMRECOMPILERSTATE pReNative)4086 DECL_INLINE_THROW(PIEMNATIVECOND) iemNativeCondPushIf(PIEMRECOMPILERSTATE pReNative) 4144 4087 { 4145 4088 uint32_t const idxStack = pReNative->cCondDepth; 4146 Assert Return(idxStack < RT_ELEMENTS(pReNative->aCondStack), NULL);4089 AssertStmt(idxStack < RT_ELEMENTS(pReNative->aCondStack), IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_COND_TOO_DEEPLY_NESTED)); 4147 4090 4148 4091 PIEMNATIVECOND const pEntry = &pReNative->aCondStack[idxStack]; … … 4152 4095 pEntry->fInElse = false; 4153 4096 pEntry->idxLabelElse = iemNativeLabelCreate(pReNative, kIemNativeLabelType_Else, UINT32_MAX /*offWhere*/, uCondSeqNo); 4154 AssertReturn(pEntry->idxLabelElse != UINT32_MAX, NULL);4155 4097 pEntry->idxLabelEndIf = iemNativeLabelCreate(pReNative, kIemNativeLabelType_Endif, UINT32_MAX /*offWhere*/, uCondSeqNo); 4156 AssertReturn(pEntry->idxLabelEndIf != UINT32_MAX, NULL);4157 4098 4158 4099 return pEntry; … … 4163 4104 * Start of the if-block, snapshotting the register and variable state. 4164 4105 */ 4165 DECLINLINE(void) iemNativeCondStartIfBlock(PIEMRECOMPILERSTATE pReNative, uint32_t offIfBlock, uint32_t idxLabelIf = UINT32_MAX) 4106 DECL_INLINE_THROW(void) 4107 iemNativeCondStartIfBlock(PIEMRECOMPILERSTATE pReNative, uint32_t offIfBlock, uint32_t idxLabelIf = UINT32_MAX) 4166 4108 { 4167 4109 Assert(offIfBlock != UINT32_MAX); … … 4187 4129 #define IEM_MC_ELSE() } while (0); \ 4188 4130 off = iemNativeEmitElse(pReNative, off); \ 4189 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4190 4131 do { 4191 4132 4192 4133 /** Emits code related to IEM_MC_ELSE. */ 4193 DECL INLINE(uint32_t) iemNativeEmitElse(PIEMRECOMPILERSTATE pReNative, uint32_t off)4134 DECL_INLINE_THROW(uint32_t) iemNativeEmitElse(PIEMRECOMPILERSTATE pReNative, uint32_t off) 4194 4135 { 4195 4136 /* Check sanity and get the conditional stack entry. */ … … 4216 4157 4217 4158 #define IEM_MC_ENDIF() } while (0); \ 4218 off = iemNativeEmitEndIf(pReNative, off); \ 4219 AssertReturn(off != UINT32_MAX, UINT32_MAX) 4159 off = iemNativeEmitEndIf(pReNative, off) 4220 4160 4221 4161 /** Emits code related to IEM_MC_ENDIF. */ 4222 DECL INLINE(uint32_t) iemNativeEmitEndIf(PIEMRECOMPILERSTATE pReNative, uint32_t off)4162 DECL_INLINE_THROW(uint32_t) iemNativeEmitEndIf(PIEMRECOMPILERSTATE pReNative, uint32_t off) 4223 4163 { 4224 4164 /* Check sanity and get the conditional stack entry. */ … … 4308 4248 4309 4249 /* Finally, check that the host register allocations matches. */ 4310 AssertMsg Return(pReNative->Core.bmHstRegs == pOther->bmHstRegs,4311 4312 4313 UINT32_MAX);4250 AssertMsgStmt(pReNative->Core.bmHstRegs == pOther->bmHstRegs, 4251 ("Core.bmHstRegs=%#x pOther->bmHstRegs=%#x - %#x\n", 4252 pReNative->Core.bmHstRegs, pOther->bmHstRegs, pReNative->Core.bmHstRegs ^ pOther->bmHstRegs), 4253 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_COND_ENDIF_RECONCILIATION_FAILED)); 4314 4254 } 4315 4255 … … 4332 4272 #define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) \ 4333 4273 off = iemNativeEmitIfEflagAnysBitsSet(pReNative, off, (a_fBits)); \ 4334 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4335 4274 do { 4336 4275 4337 4276 /** Emits code for IEM_MC_IF_EFL_ANY_BITS_SET. */ 4338 DECLINLINE(uint32_t) iemNativeEmitIfEflagAnysBitsSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitsInEfl) 4339 { 4340 PIEMNATIVECOND pEntry = iemNativeCondPushIf(pReNative); 4341 AssertReturn(pEntry, UINT32_MAX); 4277 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagAnysBitsSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitsInEfl) 4278 { 4279 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 4342 4280 4343 4281 /* Get the eflags. */ 4344 4282 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 4345 4283 kIemNativeGstRegUse_ReadOnly); 4346 AssertReturn(idxEflReg != UINT8_MAX, UINT32_MAX);4347 4284 4348 4285 /* Test and jump. */ … … 4361 4298 #define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) \ 4362 4299 off = iemNativeEmitIfEflagNoBitsSet(pReNative, off, (a_fBits)); \ 4363 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4364 4300 do { 4365 4301 4366 4302 /** Emits code for IEM_MC_IF_EFL_NO_BITS_SET. */ 4367 DECLINLINE(uint32_t) iemNativeEmitIfEflagNoBitsSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitsInEfl) 4368 { 4369 PIEMNATIVECOND pEntry = iemNativeCondPushIf(pReNative); 4370 AssertReturn(pEntry, UINT32_MAX); 4303 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagNoBitsSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitsInEfl) 4304 { 4305 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 4371 4306 4372 4307 /* Get the eflags. */ 4373 4308 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 4374 4309 kIemNativeGstRegUse_ReadOnly); 4375 AssertReturn(idxEflReg != UINT8_MAX, UINT32_MAX);4376 4310 4377 4311 /* Test and jump. */ … … 4390 4324 #define IEM_MC_IF_EFL_BIT_SET(a_fBit) \ 4391 4325 off = iemNativeEmitIfEflagsBitSet(pReNative, off, (a_fBit)); \ 4392 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4393 4326 do { 4394 4327 4395 4328 /** Emits code for IEM_MC_IF_EFL_BIT_SET. */ 4396 DECLINLINE(uint32_t) iemNativeEmitIfEflagsBitSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl) 4397 { 4398 PIEMNATIVECOND pEntry = iemNativeCondPushIf(pReNative); 4399 AssertReturn(pEntry, UINT32_MAX); 4329 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagsBitSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl) 4330 { 4331 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 4400 4332 4401 4333 /* Get the eflags. */ 4402 4334 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 4403 4335 kIemNativeGstRegUse_ReadOnly); 4404 AssertReturn(idxEflReg != UINT8_MAX, UINT32_MAX);4405 4336 4406 4337 unsigned const iBitNo = ASMBitFirstSetU32(fBitInEfl) - 1; … … 4422 4353 #define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) \ 4423 4354 off = iemNativeEmitIfEflagsBitNotSet(pReNative, off, (a_fBit)); \ 4424 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4425 4355 do { 4426 4356 4427 4357 /** Emits code for IEM_MC_IF_EFL_BIT_NOT_SET. */ 4428 DECLINLINE(uint32_t) iemNativeEmitIfEflagsBitNotSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl) 4429 { 4430 PIEMNATIVECOND pEntry = iemNativeCondPushIf(pReNative); 4431 AssertReturn(pEntry, UINT32_MAX); 4358 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagsBitNotSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl) 4359 { 4360 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 4432 4361 4433 4362 /* Get the eflags. */ 4434 4363 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 4435 4364 kIemNativeGstRegUse_ReadOnly); 4436 AssertReturn(idxEflReg != UINT8_MAX, UINT32_MAX);4437 4365 4438 4366 unsigned const iBitNo = ASMBitFirstSetU32(fBitInEfl) - 1; … … 4454 4382 #define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \ 4455 4383 off = iemNativeEmitIfEflagsTwoBitsEqual(pReNative, off, a_fBit1, a_fBit2, false /*fInverted*/); \ 4456 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4457 4384 do { 4458 4385 4459 4386 #define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \ 4460 4387 off = iemNativeEmitIfEflagsTwoBitsEqual(pReNative, off, a_fBit1, a_fBit2, true /*fInverted*/); \ 4461 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4462 4388 do { 4463 4389 4464 4390 /** Emits code for IEM_MC_IF_EFL_BITS_EQ and IEM_MC_IF_EFL_BITS_NE. */ 4465 DECL INLINE(uint32_t) iemNativeEmitIfEflagsTwoBitsEqual(PIEMRECOMPILERSTATE pReNative, uint32_t off,4466 uint32_t fBit1InEfl, uint32_t fBit2InEfl, bool fInverted) 4467 { 4468 PIEMNATIVECOND pEntry = iemNativeCondPushIf(pReNative); 4469 AssertReturn(pEntry, UINT32_MAX);4391 DECL_INLINE_THROW(uint32_t) 4392 iemNativeEmitIfEflagsTwoBitsEqual(PIEMRECOMPILERSTATE pReNative, uint32_t off, 4393 uint32_t fBit1InEfl, uint32_t fBit2InEfl, bool fInverted) 4394 { 4395 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 4470 4396 4471 4397 /* Get the eflags. */ 4472 4398 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 4473 4399 kIemNativeGstRegUse_ReadOnly); 4474 AssertReturn(idxEflReg != UINT8_MAX, UINT32_MAX);4475 4400 4476 4401 unsigned const iBitNo1 = ASMBitFirstSetU32(fBit1InEfl) - 1; … … 4483 4408 #ifdef RT_ARCH_AMD64 4484 4409 uint8_t const idxTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, fBit1InEfl); 4485 AssertReturn(idxTmpReg != UINT8_MAX, UINT32_MAX);4486 4410 4487 4411 off = iemNativeEmitAndGpr32ByGpr32(pReNative, off, idxTmpReg, idxEflReg); … … 4493 4417 4494 4418 #elif defined(RT_ARCH_ARM64) 4495 uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off); 4496 AssertReturn(idxTmpReg != UINT8_MAX, UINT32_MAX); 4497 4498 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 4499 AssertReturn(pu32CodeBuf, UINT32_MAX); 4419 uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off); 4420 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 4500 4421 4501 4422 /* and tmpreg, eflreg, #1<<iBitNo1 */ … … 4533 4454 #define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \ 4534 4455 off = iemNativeEmitIfEflagsBitNotSetAndTwoBitsEqual(pReNative, off, a_fBit, a_fBit1, a_fBit2, false /*fInverted*/); \ 4535 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4536 4456 do { 4537 4457 4538 4458 #define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \ 4539 4459 off = iemNativeEmitIfEflagsBitNotSetAndTwoBitsEqual(pReNative, off, a_fBit, a_fBit1, a_fBit2, true /*fInverted*/); \ 4540 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4541 4460 do { 4542 4461 4543 4462 /** Emits code for IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ and 4544 4463 * IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE. */ 4545 DECL INLINE(uint32_t) iemNativeEmitIfEflagsBitNotSetAndTwoBitsEqual(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl,4546 uint32_t fBit1InEfl, uint32_t fBit2InEfl, bool fInverted) 4547 { 4548 PIEMNATIVECOND pEntry = iemNativeCondPushIf(pReNative); 4549 AssertReturn(pEntry, UINT32_MAX);4464 DECL_INLINE_THROW(uint32_t) 4465 iemNativeEmitIfEflagsBitNotSetAndTwoBitsEqual(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl, 4466 uint32_t fBit1InEfl, uint32_t fBit2InEfl, bool fInverted) 4467 { 4468 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 4550 4469 4551 4470 /* We need an if-block label for the non-inverted variant. */ 4552 4471 uint32_t const idxLabelIf = fInverted ? iemNativeLabelCreate(pReNative, kIemNativeLabelType_If, UINT32_MAX, 4553 4472 pReNative->paLabels[pEntry->idxLabelElse].uData) : UINT32_MAX; 4554 AssertReturn(idxLabelIf != UINT32_MAX || !fInverted, UINT32_MAX);4555 4473 4556 4474 /* Get the eflags. */ 4557 4475 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 4558 4476 kIemNativeGstRegUse_ReadOnly); 4559 AssertReturn(idxEflReg != UINT8_MAX, UINT32_MAX);4560 4477 4561 4478 /* Translate the flag masks to bit numbers. */ … … 4577 4494 uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off); 4578 4495 #endif 4579 AssertReturn(idxTmpReg != UINT8_MAX, UINT32_MAX);4580 4496 4581 4497 /* Check for the lone bit first. */ … … 4595 4511 4596 4512 #elif defined(RT_ARCH_ARM64) 4597 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 4598 AssertReturn(pu32CodeBuf, UINT32_MAX); 4513 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 4599 4514 4600 4515 /* and tmpreg, eflreg, #1<<iBitNo1 */ … … 4632 4547 #define IEM_MC_IF_CX_IS_NZ() \ 4633 4548 off = iemNativeEmitIfCxIsNotZero(pReNative, off); \ 4634 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4635 4549 do { 4636 4550 4637 4551 /** Emits code for IEM_MC_IF_CX_IS_NZ. */ 4638 DECLINLINE(uint32_t) iemNativeEmitIfCxIsNotZero(PIEMRECOMPILERSTATE pReNative, uint32_t off) 4639 { 4640 PIEMNATIVECOND pEntry = iemNativeCondPushIf(pReNative); 4641 AssertReturn(pEntry, UINT32_MAX); 4552 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfCxIsNotZero(PIEMRECOMPILERSTATE pReNative, uint32_t off) 4553 { 4554 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 4642 4555 4643 4556 uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, 4644 4557 (IEMNATIVEGSTREG)(kIemNativeGstReg_GprFirst + X86_GREG_xCX), 4645 4558 kIemNativeGstRegUse_ReadOnly); 4646 AssertReturn(idxGstRcxReg != UINT8_MAX, UINT32_MAX);4647 4559 off = iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfNoneSet(pReNative, off, idxGstRcxReg, UINT16_MAX, pEntry->idxLabelElse); 4648 4560 iemNativeRegFreeTmp(pReNative, idxGstRcxReg); … … 4655 4567 #define IEM_MC_IF_ECX_IS_NZ() \ 4656 4568 off = iemNativeEmitIfRcxEcxIsNotZero(pReNative, off, false /*f64Bit*/); \ 4657 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4658 4569 do { 4659 4570 4660 4571 #define IEM_MC_IF_RCX_IS_NZ() \ 4661 4572 off = iemNativeEmitIfRcxEcxIsNotZero(pReNative, off, true /*f64Bit*/); \ 4662 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4663 4573 do { 4664 4574 4665 4575 /** Emits code for IEM_MC_IF_ECX_IS_NZ and IEM_MC_IF_RCX_IS_NZ. */ 4666 DECLINLINE(uint32_t) iemNativeEmitIfRcxEcxIsNotZero(PIEMRECOMPILERSTATE pReNative, uint32_t off, bool f64Bit) 4667 { 4668 PIEMNATIVECOND pEntry = iemNativeCondPushIf(pReNative); 4669 AssertReturn(pEntry, UINT32_MAX); 4576 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfRcxEcxIsNotZero(PIEMRECOMPILERSTATE pReNative, uint32_t off, bool f64Bit) 4577 { 4578 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 4670 4579 4671 4580 uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, 4672 4581 (IEMNATIVEGSTREG)(kIemNativeGstReg_GprFirst + X86_GREG_xCX), 4673 4582 kIemNativeGstRegUse_ReadOnly); 4674 AssertReturn(idxGstRcxReg != UINT8_MAX, UINT32_MAX);4675 4583 off = iemNativeEmitTestIfGprIsZeroAndJmpToLabel(pReNative, off, idxGstRcxReg, f64Bit, pEntry->idxLabelElse); 4676 4584 iemNativeRegFreeTmp(pReNative, idxGstRcxReg); … … 4683 4591 #define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \ 4684 4592 off = iemNativeEmitIfCxIsNotZeroAndTestEflagsBit(pReNative, off, a_fBit, true /*fCheckIfSet*/); \ 4685 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4686 4593 do { 4687 4594 4688 4595 #define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \ 4689 4596 off = iemNativeEmitIfCxIsNotZeroAndTestEflagsBit(pReNative, off, a_fBit, false /*fCheckIfSet*/); \ 4690 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4691 4597 do { 4692 4598 4693 4599 /** Emits code for IEM_MC_IF_CX_IS_NZ. */ 4694 DECLINLINE(uint32_t) iemNativeEmitIfCxIsNotZeroAndTestEflagsBit(PIEMRECOMPILERSTATE pReNative, uint32_t off, 4695 uint32_t fBitInEfl, bool fCheckIfSet) 4696 { 4697 PIEMNATIVECOND pEntry = iemNativeCondPushIf(pReNative); 4698 AssertReturn(pEntry, UINT32_MAX); 4600 DECL_INLINE_THROW(uint32_t) 4601 iemNativeEmitIfCxIsNotZeroAndTestEflagsBit(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl, bool fCheckIfSet) 4602 { 4603 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 4699 4604 4700 4605 /* We have to load both RCX and EFLAGS before we can start branching, … … 4702 4607 register allocator state. 4703 4608 Doing EFLAGS first as it's more likely to be loaded, right? */ 4704 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 4705 kIemNativeGstRegUse_ReadOnly); 4706 AssertReturn(idxEflReg != UINT8_MAX, UINT32_MAX); 4707 4609 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 4610 kIemNativeGstRegUse_ReadOnly); 4708 4611 uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, 4709 4612 (IEMNATIVEGSTREG)(kIemNativeGstReg_GprFirst + X86_GREG_xCX), 4710 4613 kIemNativeGstRegUse_ReadOnly); 4711 AssertReturn(idxGstRcxReg != UINT8_MAX, UINT32_MAX);4712 4614 4713 4615 /** @todo we could reduce this to a single branch instruction by spending a … … 4733 4635 #define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \ 4734 4636 off = iemNativeEmitIfRcxEcxIsNotZeroAndTestEflagsBit(pReNative, off, a_fBit, true /*fCheckIfSet*/, false /*f64Bit*/); \ 4735 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4736 4637 do { 4737 4638 4738 4639 #define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \ 4739 4640 off = iemNativeEmitIfRcxEcxIsNotZeroAndTestEflagsBit(pReNative, off, a_fBit, false /*fCheckIfSet*/, false /*f64Bit*/); \ 4740 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4741 4641 do { 4742 4642 4743 4643 #define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \ 4744 4644 off = iemNativeEmitIfRcxEcxIsNotZeroAndTestEflagsBit(pReNative, off, a_fBit, true /*fCheckIfSet*/, true /*f64Bit*/); \ 4745 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4746 4645 do { 4747 4646 4748 4647 #define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \ 4749 4648 off = iemNativeEmitIfRcxEcxIsNotZeroAndTestEflagsBit(pReNative, off, a_fBit, false /*fCheckIfSet*/, true /*f64Bit*/); \ 4750 AssertReturn(off != UINT32_MAX, UINT32_MAX); \4751 4649 do { 4752 4650 … … 4755 4653 * IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET and 4756 4654 * IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET. */ 4757 DECL INLINE(uint32_t) iemNativeEmitIfRcxEcxIsNotZeroAndTestEflagsBit(PIEMRECOMPILERSTATE pReNative, uint32_t off,4758 uint32_t fBitInEfl, bool fCheckIfSet, bool f64Bit) 4759 { 4760 PIEMNATIVECOND pEntry = iemNativeCondPushIf(pReNative); 4761 AssertReturn(pEntry, UINT32_MAX);4655 DECL_INLINE_THROW(uint32_t) 4656 iemNativeEmitIfRcxEcxIsNotZeroAndTestEflagsBit(PIEMRECOMPILERSTATE pReNative, uint32_t off, 4657 uint32_t fBitInEfl, bool fCheckIfSet, bool f64Bit) 4658 { 4659 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 4762 4660 4763 4661 /* We have to load both RCX and EFLAGS before we can start branching, … … 4765 4663 register allocator state. 4766 4664 Doing EFLAGS first as it's more likely to be loaded, right? */ 4767 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 4768 kIemNativeGstRegUse_ReadOnly); 4769 AssertReturn(idxEflReg != UINT8_MAX, UINT32_MAX); 4770 4665 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 4666 kIemNativeGstRegUse_ReadOnly); 4771 4667 uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, 4772 4668 (IEMNATIVEGSTREG)(kIemNativeGstReg_GprFirst + X86_GREG_xCX), 4773 4669 kIemNativeGstRegUse_ReadOnly); 4774 AssertReturn(idxGstRcxReg != UINT8_MAX, UINT32_MAX);4775 4670 4776 4671 /** @todo we could reduce this to a single branch instruction by spending a … … 4800 4695 4801 4696 #define IEM_MC_SUB_GREG_U16(a_iGReg, a_u8SubtrahendConst) \ 4802 off = iemNativeEmitSubGregU16(pReNative, off, a_iGReg, a_u8SubtrahendConst); \ 4803 AssertReturn(off != UINT32_MAX, UINT32_MAX) 4697 off = iemNativeEmitSubGregU16(pReNative, off, a_iGReg, a_u8SubtrahendConst) 4804 4698 4805 4699 /** Emits code for IEM_MC_SUB_GREG_U16. */ … … 4809 4703 (IEMNATIVEGSTREG)(kIemNativeGstReg_GprFirst + iGReg), 4810 4704 kIemNativeGstRegUse_ForUpdate); 4811 AssertReturn(idxGstTmpReg != UINT8_MAX, UINT32_MAX);4812 4705 4813 4706 #ifdef RT_ARCH_AMD64 4814 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); 4815 AssertReturn(pbCodeBuf, UINT32_MAX); 4707 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); 4816 4708 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP; 4817 4709 if (idxGstTmpReg >= 8) … … 4831 4723 4832 4724 #else 4833 uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off); 4834 AssertReturn(idxTmpReg != UINT8_MAX, UINT32_MAX); 4835 4836 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 4837 AssertReturn(pu32CodeBuf, UINT32_MAX); 4725 uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off); 4726 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 4838 4727 4839 4728 /* sub tmp, gstgrp, uSubtrahend */ … … 4856 4745 4857 4746 #define IEM_MC_SUB_GREG_U32(a_iGReg, a_u8Const) \ 4858 off = iemNativeEmitSubGregU32U64(pReNative, off, a_iGReg, a_u8Const, false /*f64Bit*/); \ 4859 AssertReturn(off != UINT32_MAX, UINT32_MAX) 4747 off = iemNativeEmitSubGregU32U64(pReNative, off, a_iGReg, a_u8Const, false /*f64Bit*/) 4860 4748 4861 4749 #define IEM_MC_SUB_GREG_U64(a_iGReg, a_u8Const) \ 4862 off = iemNativeEmitSubGregU32U64(pReNative, off, a_iGReg, a_u8Const, true /*f64Bit*/); \ 4863 AssertReturn(off != UINT32_MAX, UINT32_MAX) 4750 off = iemNativeEmitSubGregU32U64(pReNative, off, a_iGReg, a_u8Const, true /*f64Bit*/) 4864 4751 4865 4752 /** Emits code for IEM_MC_SUB_GREG_U32 and IEM_MC_SUB_GREG_U64. */ 4866 DECL INLINE(uint32_t) iemNativeEmitSubGregU32U64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg,4867 4753 DECL_INLINE_THROW(uint32_t) 4754 iemNativeEmitSubGregU32U64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint8_t uSubtrahend, bool f64Bit) 4868 4755 { 4869 4756 uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, 4870 4757 (IEMNATIVEGSTREG)(kIemNativeGstReg_GprFirst + iGReg), 4871 4758 kIemNativeGstRegUse_ForUpdate); 4872 AssertReturn(idxGstTmpReg != UINT8_MAX, UINT32_MAX);4873 4759 4874 4760 #ifdef RT_ARCH_AMD64 4875 4761 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6); 4876 AssertReturn(pbCodeBuf, UINT32_MAX);4877 4762 if (f64Bit) 4878 4763 pbCodeBuf[off++] = X86_OP_REX_W | (idxGstTmpReg >= 8 ? X86_OP_REX_B : 0); … … 4904 4789 /* sub tmp, gstgrp, uSubtrahend */ 4905 4790 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 4906 AssertReturn(pu32CodeBuf, UINT32_MAX);4907 4791 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, idxTmpReg, idxGstTmpReg, uSubtrahend, f64Bit); 4908 4792 … … 4945 4829 { 4946 4830 RT_NOREF(pCallEntry); 4947 //pReNative->pInstrBuf[off++] = 0xcc;4948 4831 4949 4832 /* It's too convenient to use iemNativeEmitTestBitInGprAndJmpToLabelIfNotSet below … … 4951 4834 uint32_t const idxLabelVmCheck = iemNativeLabelCreate(pReNative, kIemNativeLabelType_CheckIrq, 4952 4835 UINT32_MAX, pReNative->uCheckIrqSeqNo++); 4953 AssertReturn(idxLabelVmCheck != UINT32_MAX, UINT32_MAX);4954 4836 4955 4837 uint32_t const idxLabelReturnBreak = iemNativeLabelCreate(pReNative, kIemNativeLabelType_ReturnBreak); 4956 AssertReturn(idxLabelReturnBreak != UINT32_MAX, UINT32_MAX);4957 4838 4958 4839 /* Again, we need to load the extended EFLAGS before we actually need them … … 4962 4843 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 4963 4844 kIemNativeGstRegUse_ReadOnly); 4964 AssertReturn(idxEflReg != UINT8_MAX, UINT32_MAX); 4965 4966 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, 4967 kIemNativeGstRegUse_ReadOnly); 4968 AssertReturn(idxPcReg != UINT8_MAX, UINT32_MAX); 4845 4846 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ReadOnly); 4969 4847 4970 4848 uint8_t idxTmpReg = iemNativeRegAllocTmp(pReNative, &off); 4971 AssertReturn(idxTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);4972 4849 4973 4850 /* … … 5037 4914 { 5038 4915 uint32_t const fExpectedExec = (uint32_t)pCallEntry->auParams[0]; 5039 uint8_t idxTmpReg = iemNativeRegAllocTmp(pReNative, &off); 5040 5041 AssertReturn(idxTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX); 4916 uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off); 4917 5042 4918 off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, idxTmpReg, RT_UOFFSETOF(VMCPUCC, iem.s.fExec)); 5043 4919 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxTmpReg, IEMTB_F_KEY_MASK); … … 5087 4963 * @param cbBuf The output buffer size. At least 32 bytes. 5088 4964 */ 5089 const char *iemTbFlagsToString(uint32_t fFlags, char *pszBuf, size_t cbBuf) 4965 DECLHIDDEN(const char *) iemTbFlagsToString(uint32_t fFlags, char *pszBuf, size_t cbBuf) RT_NOEXCEPT 5090 4966 { 5091 4967 Assert(cbBuf >= 32); … … 5173 5049 5174 5050 5175 void iemNativeDisassembleTb(PCIEMTB pTb, PCDBGFINFOHLP pHlp) 5051 DECLHIDDEN(void) iemNativeDisassembleTb(PCIEMTB pTb, PCDBGFINFOHLP pHlp) RT_NOEXCEPT 5176 5052 { 5177 5053 AssertReturnVoid((pTb->fFlags & IEMTB_F_TYPE_MASK) == IEMTB_F_TYPE_NATIVE); … … 5547 5423 * @param pTb The threaded translation to recompile to native. 5548 5424 */ 5549 PIEMTB iemNativeRecompile(PVMCPUCC pVCpu, PIEMTB pTb) 5425 DECLHIDDEN(PIEMTB) iemNativeRecompile(PVMCPUCC pVCpu, PIEMTB pTb) RT_NOEXCEPT 5550 5426 { 5551 5427 /* … … 5563 5439 5564 5440 /* 5565 * Emit prolog code (fixed). 5566 */ 5567 uint32_t off = iemNativeEmitProlog(pReNative, 0); 5568 AssertReturn(off != UINT32_MAX, pTb); 5569 5570 /* 5571 * Convert the calls to native code. 5572 */ 5573 #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO 5574 int32_t iGstInstr = -1; 5575 uint32_t fExec = pTb->fFlags; 5576 #endif 5577 PCIEMTHRDEDCALLENTRY pCallEntry = pTb->Thrd.paCalls; 5578 uint32_t cCallsLeft = pTb->Thrd.cCalls; 5441 * Recompiling and emitting code is done using try/throw/catch or setjmp/longjmp 5442 * for aborting if an error happens. 5443 */ 5444 uint32_t cCallsLeft = pTb->Thrd.cCalls; 5579 5445 #ifdef LOG_ENABLED 5580 uint32_t const 5581 #endif 5582 while (cCallsLeft-- > 0)5583 {5584 PFNIEMNATIVERECOMPFUNC const pfnRecom = g_apfnIemNativeRecompileFunctions[pCallEntry->enmFunction];5585 5446 uint32_t const cCallsOrg = cCallsLeft; 5447 #endif 5448 uint32_t off = 0; 5449 int rc = VINF_SUCCESS; 5450 IEMNATIVE_TRY_SETJMP(pReNative, rc) 5451 { 5586 5452 /* 5587 * Debug info and assembly markup. 5453 * Emit prolog code (fixed). 5454 */ 5455 off = iemNativeEmitProlog(pReNative, off); 5456 5457 /* 5458 * Convert the calls to native code. 5588 5459 */ 5589 5460 #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO 5590 if (pCallEntry->enmFunction == kIemThreadedFunc_BltIn_CheckMode) 5591 fExec = pCallEntry->auParams[0]; 5592 iemNativeDbgInfoAddNativeOffset(pReNative, off); 5593 if (iGstInstr < (int32_t)pCallEntry->idxInstr) 5461 int32_t iGstInstr = -1; 5462 uint32_t fExec = pTb->fFlags; 5463 #endif 5464 PCIEMTHRDEDCALLENTRY pCallEntry = pTb->Thrd.paCalls; 5465 while (cCallsLeft-- > 0) 5594 5466 { 5595 if (iGstInstr < (int32_t)pTb->cInstructions) 5596 iemNativeDbgInfoAddGuestInstruction(pReNative, fExec); 5467 PFNIEMNATIVERECOMPFUNC const pfnRecom = g_apfnIemNativeRecompileFunctions[pCallEntry->enmFunction]; 5468 5469 /* 5470 * Debug info and assembly markup. 5471 */ 5472 #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO 5473 if (pCallEntry->enmFunction == kIemThreadedFunc_BltIn_CheckMode) 5474 fExec = pCallEntry->auParams[0]; 5475 iemNativeDbgInfoAddNativeOffset(pReNative, off); 5476 if (iGstInstr < (int32_t)pCallEntry->idxInstr) 5477 { 5478 if (iGstInstr < (int32_t)pTb->cInstructions) 5479 iemNativeDbgInfoAddGuestInstruction(pReNative, fExec); 5480 else 5481 Assert(iGstInstr == pTb->cInstructions); 5482 iGstInstr = pCallEntry->idxInstr; 5483 } 5484 iemNativeDbgInfoAddThreadedCall(pReNative, (IEMTHREADEDFUNCS)pCallEntry->enmFunction, pfnRecom != NULL); 5485 #elif defined(VBOX_STRICT) 5486 off = iemNativeEmitMarker(pReNative, off, 5487 RT_MAKE_U32((pTb->Thrd.cCalls - cCallsLeft - 1) | (pfnRecom ? 0x8000 : 0), 5488 pCallEntry->enmFunction)); 5489 #endif 5490 5491 /* 5492 * Actual work. 5493 */ 5494 if (pfnRecom) /** @todo stats on this. */ 5495 { 5496 //STAM_COUNTER_INC() 5497 off = pfnRecom(pReNative, off, pCallEntry); 5498 } 5597 5499 else 5598 Assert(iGstInstr == pTb->cInstructions); 5599 iGstInstr = pCallEntry->idxInstr; 5500 off = iemNativeEmitThreadedCall(pReNative, off, pCallEntry); 5501 Assert(off <= pReNative->cInstrBufAlloc); 5502 Assert(pReNative->cCondDepth == 0); 5503 5504 /* 5505 * Advance. 5506 */ 5507 pCallEntry++; 5600 5508 } 5601 iemNativeDbgInfoAddThreadedCall(pReNative, (IEMTHREADEDFUNCS)pCallEntry->enmFunction, pfnRecom != NULL);5602 #elif defined(VBOX_STRICT)5603 off = iemNativeEmitMarker(pReNative, off,5604 RT_MAKE_U32((pTb->Thrd.cCalls - cCallsLeft - 1) | (pfnRecom ? 0x8000 : 0),5605 pCallEntry->enmFunction));5606 AssertReturn(off != UINT32_MAX, pTb);5607 #endif5608 5509 5609 5510 /* 5610 * Actual work.5511 * Emit the epilog code. 5611 5512 */ 5612 if (pfnRecom) /** @todo stats on this. */ 5613 { 5614 //STAM_COUNTER_INC() 5615 off = pfnRecom(pReNative, off, pCallEntry); 5616 } 5617 else 5618 off = iemNativeEmitThreadedCall(pReNative, off, pCallEntry); 5619 AssertReturn(off != UINT32_MAX, pTb); 5620 Assert(pReNative->cCondDepth == 0); 5513 uint32_t idxReturnLabel; 5514 off = iemNativeEmitEpilog(pReNative, off, &idxReturnLabel); 5621 5515 5622 5516 /* 5623 * Advance.5517 * Generate special jump labels. 5624 5518 */ 5625 pCallEntry++; 5519 if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_ReturnBreak)) 5520 off = iemNativeEmitReturnBreak(pReNative, off, idxReturnLabel); 5521 if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_RaiseGp0)) 5522 off = iemNativeEmitRaiseGp0(pReNative, off, idxReturnLabel); 5626 5523 } 5627 5628 /* 5629 * Emit the epilog code. 5630 */ 5631 uint32_t idxReturnLabel; 5632 off = iemNativeEmitEpilog(pReNative, off, &idxReturnLabel); 5633 AssertReturn(off != UINT32_MAX, pTb); 5634 5635 /* 5636 * Generate special jump labels. 5637 */ 5638 if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_ReturnBreak)) 5639 { 5640 off = iemNativeEmitReturnBreak(pReNative, off, idxReturnLabel); 5641 AssertReturn(off != UINT32_MAX, pTb); 5524 IEMNATIVE_CATCH_LONGJMP_BEGIN(pReNative, rc); 5525 { 5526 Log(("iemNativeRecompile: Caught %Rrc while recompiling!\n", rc)); 5527 return pTb; 5642 5528 } 5643 if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_RaiseGp0)) 5644 { 5645 off = iemNativeEmitRaiseGp0(pReNative, off, idxReturnLabel); 5646 AssertReturn(off != UINT32_MAX, pTb); 5647 } 5529 IEMNATIVE_CATCH_LONGJMP_END(pReNative); 5530 Assert(off <= pReNative->cInstrBufAlloc); 5648 5531 5649 5532 /* -
trunk/src/VBox/VMM/include/IEMInternal.h
r101640 r101682 1845 1845 # define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \ 1846 1846 jmp_buf JmpBuf; \ 1847 jmp_buf * volatile pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf); \1848 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \1847 jmp_buf * volatile pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \ 1848 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \ 1849 1849 if ((rcStrict = setjmp(JmpBuf)) == 0) 1850 1850 # define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \ 1851 pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf); \1852 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \1851 pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \ 1852 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \ 1853 1853 if ((rcStrict = setjmp(JmpBuf)) == 0) 1854 1854 # define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \ … … 5590 5590 5591 5591 /* Native recompiler public bits: */ 5592 PIEMTB iemNativeRecompile(PVMCPUCC pVCpu, PIEMTB pTb);5593 int iemExecMemAllocatorInit(PVMCPU pVCpu, uint64_t cbMax, uint64_t cbInitial, uint32_t cbChunk);5594 void iemExecMemAllocatorFree(PVMCPU pVCpu, void *pv, size_t cb);5592 DECLHIDDEN(PIEMTB) iemNativeRecompile(PVMCPUCC pVCpu, PIEMTB pTb) RT_NOEXCEPT; 5593 int iemExecMemAllocatorInit(PVMCPU pVCpu, uint64_t cbMax, uint64_t cbInitial, uint32_t cbChunk); 5594 void iemExecMemAllocatorFree(PVMCPU pVCpu, void *pv, size_t cb); 5595 5595 5596 5596 -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r101661 r101682 621 621 /** The condition nesting stack. */ 622 622 IEMNATIVECOND aCondStack[2]; 623 624 #ifndef IEM_WITH_THROW_CATCH 625 /** Pointer to the setjmp/longjmp buffer if we're not using C++ exceptions 626 * for recompilation error handling. */ 627 jmp_buf JmpBuf; 628 #endif 623 629 } IEMRECOMPILERSTATE; 624 630 /** Pointer to a native recompiler state. */ … … 626 632 627 633 634 /** @def IEMNATIVE_TRY_SETJMP 635 * Wrapper around setjmp / try, hiding all the ugly differences. 636 * 637 * @note Use with extreme care as this is a fragile macro. 638 * @param a_pReNative The native recompile state. 639 * @param a_rcTarget The variable that should receive the status code in case 640 * of a longjmp/throw. 641 */ 642 /** @def IEMNATIVE_CATCH_LONGJMP_BEGIN 643 * Start wrapper for catch / setjmp-else. 644 * 645 * This will set up a scope. 646 * 647 * @note Use with extreme care as this is a fragile macro. 648 * @param a_pReNative The native recompile state. 649 * @param a_rcTarget The variable that should receive the status code in case 650 * of a longjmp/throw. 651 */ 652 /** @def IEMNATIVE_CATCH_LONGJMP_END 653 * End wrapper for catch / setjmp-else. 654 * 655 * This will close the scope set up by IEMNATIVE_CATCH_LONGJMP_BEGIN and clean 656 * up the state. 657 * 658 * @note Use with extreme care as this is a fragile macro. 659 * @param a_pReNative The native recompile state. 660 */ 661 /** @def IEMNATIVE_DO_LONGJMP 662 * 663 * Wrapper around longjmp / throw. 664 * 665 * @param a_pReNative The native recompile state. 666 * @param a_rc The status code jump back with / throw. 667 */ 668 #ifdef IEM_WITH_THROW_CATCH 669 # define IEMNATIVE_TRY_SETJMP(a_pReNative, a_rcTarget) \ 670 a_rcTarget = VINF_SUCCESS; \ 671 try 672 # define IEMNATIVE_CATCH_LONGJMP_BEGIN(a_pReNative, a_rcTarget) \ 673 catch (int rcThrown) \ 674 { \ 675 a_rcTarget = rcThrown 676 # define IEMNATIVE_CATCH_LONGJMP_END(a_pReNative) \ 677 } \ 678 ((void)0) 679 # define IEMNATIVE_DO_LONGJMP(a_pReNative, a_rc) throw int(a_rc) 680 #else /* !IEM_WITH_THROW_CATCH */ 681 # define IEMNATIVE_TRY_SETJMP(a_pReNative, a_rcTarget) \ 682 if ((a_rcTarget = setjmp((a_pReNative)->JmpBuf)) == 0) 683 # define IEMNATIVE_CATCH_LONGJMP_BEGIN(a_pReNative, a_rcTarget) \ 684 else \ 685 { \ 686 ((void)0) 687 # define IEMNATIVE_CATCH_LONGJMP_END(a_pReNative) \ 688 } 689 # define IEMNATIVE_DO_LONGJMP(a_pReNative, a_rc) longjmp((a_pReNative)->JmpBuf, (a_rc)) 690 #endif /* !IEM_WITH_THROW_CATCH */ 691 692 628 693 /** 629 694 * Native recompiler worker for a threaded function. 630 695 * 631 * @returns New code buffer offset , UINT32_MAX in case offailure.696 * @returns New code buffer offset; throws VBox status code in case of a failure. 632 697 * @param pReNative The native recompiler state. 633 698 * @param off The current code buffer offset. 634 699 * @param pCallEntry The threaded call entry. 635 700 * 636 * @note This is not allowed to throw anything atm. 637 */ 638 typedef DECLCALLBACKTYPE(uint32_t, FNIEMNATIVERECOMPFUNC,(PIEMRECOMPILERSTATE pReNative, uint32_t off, 639 PCIEMTHRDEDCALLENTRY pCallEntry)); 701 * @note This may throw/longjmp VBox status codes (int) to abort compilation, so no RT_NOEXCEPT! 702 */ 703 typedef uint32_t (VBOXCALL FNIEMNATIVERECOMPFUNC)(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry); 640 704 /** Pointer to a native recompiler worker for a threaded function. */ 641 705 typedef FNIEMNATIVERECOMPFUNC *PFNIEMNATIVERECOMPFUNC; 642 706 643 /** Defines a native recompiler worker for a threaded function. */ 707 /** Defines a native recompiler worker for a threaded function. 708 * @see FNIEMNATIVERECOMPFUNC */ 644 709 #define IEM_DECL_IEMNATIVERECOMPFUNC_DEF(a_Name) \ 645 DECLCALLBACK(uint32_t) a_Name(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry) 646 /** Prototypes a native recompiler function for a threaded function. */ 710 uint32_t VBOXCALL a_Name(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry) 711 712 /** Prototypes a native recompiler function for a threaded function. 713 * @see FNIEMNATIVERECOMPFUNC */ 647 714 #define IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(a_Name) FNIEMNATIVERECOMPFUNC a_Name 648 715 649 DECLHIDDEN(uint32_t) iemNativeLabelCreate(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType, 650 uint32_t offWhere = UINT32_MAX, uint16_t uData = 0) RT_NOEXCEPT; 651 DECLHIDDEN(void) iemNativeLabelDefine(PIEMRECOMPILERSTATE pReNative, uint32_t idxLabel, uint32_t offWhere) RT_NOEXCEPT; 652 DECLHIDDEN(bool) iemNativeAddFixup(PIEMRECOMPILERSTATE pReNative, uint32_t offWhere, uint32_t idxLabel, 653 IEMNATIVEFIXUPTYPE enmType, int8_t offAddend = 0) RT_NOEXCEPT; 654 DECLHIDDEN(PIEMNATIVEINSTR) iemNativeInstrBufEnsureSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off, 655 uint32_t cInstrReq) RT_NOEXCEPT; 656 657 DECLHIDDEN(uint8_t) iemNativeRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 658 bool fPreferVolatile = true) RT_NOEXCEPT; 659 DECLHIDDEN(uint8_t) iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm, 660 bool fPreferVolatile = true) RT_NOEXCEPT; 661 DECLHIDDEN(uint8_t) iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 662 IEMNATIVEGSTREG enmGstReg, 663 IEMNATIVEGSTREGUSE enmIntendedUse) RT_NOEXCEPT; 664 DECLHIDDEN(uint8_t) iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 665 IEMNATIVEGSTREG enmGstReg) RT_NOEXCEPT; 666 667 DECLHIDDEN(uint8_t) iemNativeRegAllocVar(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint8_t idxVar) RT_NOEXCEPT; 668 DECLHIDDEN(uint32_t) iemNativeRegAllocArgs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs) RT_NOEXCEPT; 669 DECLHIDDEN(uint8_t) iemNativeRegAssignRc(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT; 716 DECL_HIDDEN_THROW(uint32_t) iemNativeLabelCreate(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType, 717 uint32_t offWhere = UINT32_MAX, uint16_t uData = 0); 718 DECL_HIDDEN_THROW(void) iemNativeLabelDefine(PIEMRECOMPILERSTATE pReNative, uint32_t idxLabel, uint32_t offWhere); 719 DECL_HIDDEN_THROW(void) iemNativeAddFixup(PIEMRECOMPILERSTATE pReNative, uint32_t offWhere, uint32_t idxLabel, 720 IEMNATIVEFIXUPTYPE enmType, int8_t offAddend = 0); 721 DECL_HIDDEN_THROW(PIEMNATIVEINSTR) iemNativeInstrBufEnsureSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t cInstrReq); 722 723 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fPreferVolatile = true); 724 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm, 725 bool fPreferVolatile = true); 726 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 727 IEMNATIVEGSTREG enmGstReg, IEMNATIVEGSTREGUSE enmIntendedUse); 728 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 729 IEMNATIVEGSTREG enmGstReg); 730 731 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocVar(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint8_t idxVar); 732 DECL_HIDDEN_THROW(uint32_t) iemNativeRegAllocArgs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs); 733 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAssignRc(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg); 670 734 DECLHIDDEN(void) iemNativeRegFree(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT; 671 735 DECLHIDDEN(void) iemNativeRegFreeTmp(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT; 672 736 DECLHIDDEN(void) iemNativeRegFreeTmpImm(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT; 673 737 DECLHIDDEN(void) iemNativeRegFreeAndFlushMask(PIEMRECOMPILERSTATE pReNative, uint32_t fHstRegMask) RT_NOEXCEPT; 674 DECLHIDDEN(uint32_t) iemNativeRegFlushPendingWrites(PIEMRECOMPILERSTATE pReNative, uint32_t off) RT_NOEXCEPT; 675 676 DECLHIDDEN(uint32_t) iemNativeEmitLoadGprWithGstShadowReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, 677 uint8_t idxHstReg, IEMNATIVEGSTREG enmGstReg) RT_NOEXCEPT; 678 DECLHIDDEN(uint32_t) iemNativeEmitCheckCallRetAndPassUp(PIEMRECOMPILERSTATE pReNative, uint32_t off, 679 uint8_t idxInstr) RT_NOEXCEPT; 738 DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushPendingWrites(PIEMRECOMPILERSTATE pReNative, uint32_t off); 739 740 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitLoadGprWithGstShadowReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, 741 uint8_t idxHstReg, IEMNATIVEGSTREG enmGstReg); 742 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitCheckCallRetAndPassUp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr); 680 743 681 744 … … 688 751 * allocation size. 689 752 * 690 * @returns Pointer to the instruction output buffer on success , NULL on691 * failure.753 * @returns Pointer to the instruction output buffer on success; throws VBox 754 * status code on failure, so no need to check it. 692 755 * @param pReNative The native recompile state. 693 756 * @param off Current instruction offset. Works safely for UINT32_MAX … … 696 759 * overestimate this a bit. 697 760 */ 698 DECL_FORCE_INLINE(PIEMNATIVEINSTR) iemNativeInstrBufEnsure(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t cInstrReq) 699 { 700 uint64_t const offChecked = off + (uint64_t)cInstrReq; 761 DECL_FORCE_INLINE_THROW(PIEMNATIVEINSTR) 762 iemNativeInstrBufEnsure(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t cInstrReq) 763 { 764 uint64_t const offChecked = off + (uint64_t)cInstrReq; /** @todo may reconsider the need for UINT32_MAX safety... */ 701 765 if (RT_LIKELY(offChecked <= pReNative->cInstrBufAlloc)) 702 766 { … … 721 785 * in the disassembly. 722 786 */ 723 DECLINLINE(uint32_t) iemNativeEmitMarker(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t uInfo) 787 DECL_INLINE_THROW(uint32_t) 788 iemNativeEmitMarker(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t uInfo) 724 789 { 725 790 #ifdef RT_ARCH_AMD64 726 791 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 727 AssertReturn(pbCodeBuf, UINT32_MAX);728 792 if (uInfo == 0) 729 793 { … … 743 807 } 744 808 #elif RT_ARCH_ARM64 809 /* nop */ 745 810 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 746 AssertReturn(pu32CodeBuf, UINT32_MAX);747 /* nop */748 811 pu32CodeBuf[off++] = 0xd503201f; 749 812 … … 764 827 * Emits setting a GPR to zero. 765 828 */ 766 DECLINLINE(uint32_t) iemNativeEmitGprZero(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr) 767 { 768 #ifdef RT_ARCH_AMD64 829 DECL_INLINE_THROW(uint32_t) 830 iemNativeEmitGprZero(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr) 831 { 832 #ifdef RT_ARCH_AMD64 833 /* xor gpr32, gpr32 */ 769 834 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 770 AssertReturn(pbCodeBuf, UINT32_MAX);771 /* xor gpr32, gpr32 */772 835 if (iGpr >= 8) 773 836 pbCodeBuf[off++] = X86_OP_REX_R | X86_OP_REX_B; … … 776 839 777 840 #elif RT_ARCH_ARM64 841 /* mov gpr, #0x0 */ 778 842 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 779 AssertReturn(pu32CodeBuf, UINT32_MAX);780 /* mov gpr, #0x0 */781 843 pu32CodeBuf[off++] = UINT32_C(0xd2800000) | iGpr; 782 844 … … 792 854 * Emits loading a constant into a 64-bit GPR 793 855 */ 794 DECLINLINE(uint32_t) iemNativeEmitLoadGprImm64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint64_t uImm64) 856 DECL_INLINE_THROW(uint32_t) 857 iemNativeEmitLoadGprImm64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint64_t uImm64) 795 858 { 796 859 if (!uImm64) … … 802 865 /* mov gpr, imm32 */ 803 866 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6); 804 AssertReturn(pbCodeBuf, UINT32_MAX);805 867 if (iGpr >= 8) 806 868 pbCodeBuf[off++] = X86_OP_REX_B; … … 815 877 /* mov gpr, imm64 */ 816 878 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10); 817 AssertReturn(pbCodeBuf, UINT32_MAX);818 879 if (iGpr < 8) 819 880 pbCodeBuf[off++] = X86_OP_REX_W; … … 833 894 #elif RT_ARCH_ARM64 834 895 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); 835 AssertReturn(pu32CodeBuf, UINT32_MAX);836 896 837 897 /* … … 891 951 * only the ARM64 version does that. 892 952 */ 893 DECLINLINE(uint32_t) iemNativeEmitLoadGpr8Imm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint8_t uImm8) 953 DECL_INLINE_THROW(uint32_t) 954 iemNativeEmitLoadGpr8Imm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint8_t uImm8) 894 955 { 895 956 #ifdef RT_ARCH_AMD64 896 957 /* mov gpr, imm8 */ 897 958 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 898 AssertReturn(pbCodeBuf, UINT32_MAX);899 959 if (iGpr >= 8) 900 960 pbCodeBuf[off++] = X86_OP_REX_B; … … 907 967 /* movz gpr, imm16, lsl #0 */ 908 968 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 909 AssertReturn(pu32CodeBuf, UINT32_MAX);910 969 pu32CodeBuf[off++] = UINT32_C(0xd2800000) | (UINT32_C(0) << 21) | ((uint32_t)uImm8 << 5) | iGpr; 911 970 … … 922 981 * Common bit of iemNativeEmitLoadGprFromVCpuU64 and friends. 923 982 */ 924 DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByVCpuDisp(uint8_t *pbCodeBuf, uint32_t off, uint8_t iGprReg, uint32_t offVCpu) 983 DECL_FORCE_INLINE(uint32_t) 984 iemNativeEmitGprByVCpuDisp(uint8_t *pbCodeBuf, uint32_t off, uint8_t iGprReg, uint32_t offVCpu) 925 985 { 926 986 if (offVCpu < 128) … … 943 1003 * Common bit of iemNativeEmitLoadGprFromVCpuU64 and friends. 944 1004 */ 945 DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByVCpuLdSt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprReg, 946 uint32_t offVCpu, ARMV8A64INSTRLDSTTYPE enmOperation, unsigned cbData) 1005 DECL_FORCE_INLINE_THROW(uint32_t) 1006 iemNativeEmitGprByVCpuLdSt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprReg, 1007 uint32_t offVCpu, ARMV8A64INSTRLDSTTYPE enmOperation, unsigned cbData) 947 1008 { 948 1009 /* … … 955 1016 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */ 956 1017 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 957 AssertReturn(pu32CodeBuf, UINT32_MAX);958 1018 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGpr, IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData); 959 1019 } … … 961 1021 { 962 1022 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 963 AssertReturn(pu32CodeBuf, UINT32_MAX);964 1023 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGpr, IEMNATIVE_REG_FIXED_PCPUMCTX, 965 1024 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData); … … 973 1032 974 1033 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 975 AssertReturn(pu32CodeBuf, UINT32_MAX);976 1034 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(enmOperation, iGpr, IEMNATIVE_REG_FIXED_PVMCPU, IEMNATIVE_REG_FIXED_TMP); 977 1035 } … … 985 1043 * Emits a 64-bit GPR load of a VCpu value. 986 1044 */ 987 DECLINLINE(uint32_t) iemNativeEmitLoadGprFromVCpuU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 1045 DECL_INLINE_THROW(uint32_t) 1046 iemNativeEmitLoadGprFromVCpuU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 988 1047 { 989 1048 #ifdef RT_ARCH_AMD64 990 1049 /* mov reg64, mem64 */ 991 1050 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 992 AssertReturn(pbCodeBuf, UINT32_MAX);993 1051 if (iGpr < 8) 994 1052 pbCodeBuf[off++] = X86_OP_REX_W; … … 1013 1071 * @note Bits 32 thru 63 in the GPR will be zero after the operation. 1014 1072 */ 1015 DECLINLINE(uint32_t) iemNativeEmitLoadGprFromVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 1073 DECL_INLINE_THROW(uint32_t) 1074 iemNativeEmitLoadGprFromVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 1016 1075 { 1017 1076 #ifdef RT_ARCH_AMD64 1018 1077 /* mov reg32, mem32 */ 1019 1078 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1020 AssertReturn(pbCodeBuf, UINT32_MAX);1021 1079 if (iGpr >= 8) 1022 1080 pbCodeBuf[off++] = X86_OP_REX_R; … … 1039 1097 * @note Bits 16 thru 63 in the GPR will be zero after the operation. 1040 1098 */ 1041 DECLINLINE(uint32_t) iemNativeEmitLoadGprFromVCpuU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 1099 DECL_INLINE_THROW(uint32_t) 1100 iemNativeEmitLoadGprFromVCpuU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 1042 1101 { 1043 1102 #ifdef RT_ARCH_AMD64 1044 1103 /* movzx reg32, mem16 */ 1045 1104 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 1046 AssertReturn(pbCodeBuf, UINT32_MAX);1047 1105 if (iGpr >= 8) 1048 1106 pbCodeBuf[off++] = X86_OP_REX_R; … … 1066 1124 * @note Bits 8 thru 63 in the GPR will be zero after the operation. 1067 1125 */ 1068 DECLINLINE(uint32_t) iemNativeEmitLoadGprFromVCpuU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 1126 DECL_INLINE_THROW(uint32_t) 1127 iemNativeEmitLoadGprFromVCpuU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 1069 1128 { 1070 1129 #ifdef RT_ARCH_AMD64 1071 1130 /* movzx reg32, mem8 */ 1072 1131 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 1073 AssertReturn(pbCodeBuf, UINT32_MAX);1074 1132 if (iGpr >= 8) 1075 1133 pbCodeBuf[off++] = X86_OP_REX_R; … … 1092 1150 * Emits a store of a GPR value to a 64-bit VCpu field. 1093 1151 */ 1094 DECLINLINE(uint32_t) iemNativeEmitStoreGprToVCpuU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 1152 DECL_INLINE_THROW(uint32_t) 1153 iemNativeEmitStoreGprToVCpuU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 1095 1154 { 1096 1155 #ifdef RT_ARCH_AMD64 1097 1156 /* mov mem64, reg64 */ 1098 1157 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1099 AssertReturn(pbCodeBuf, UINT32_MAX);1100 1158 if (iGpr < 8) 1101 1159 pbCodeBuf[off++] = X86_OP_REX_W; … … 1119 1177 * Emits a store of a GPR value to a 32-bit VCpu field. 1120 1178 */ 1121 DECLINLINE(uint32_t) iemNativeEmitStoreGprFromVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 1179 DECL_INLINE_THROW(uint32_t) 1180 iemNativeEmitStoreGprFromVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 1122 1181 { 1123 1182 #ifdef RT_ARCH_AMD64 1124 1183 /* mov mem32, reg32 */ 1125 1184 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1126 AssertReturn(pbCodeBuf, UINT32_MAX);1127 1185 if (iGpr >= 8) 1128 1186 pbCodeBuf[off++] = X86_OP_REX_R; … … 1144 1202 * Emits a store of a GPR value to a 16-bit VCpu field. 1145 1203 */ 1146 DECLINLINE(uint32_t) iemNativeEmitStoreGprFromVCpuU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 1204 DECL_INLINE_THROW(uint32_t) 1205 iemNativeEmitStoreGprFromVCpuU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 1147 1206 { 1148 1207 #ifdef RT_ARCH_AMD64 1149 1208 /* mov mem16, reg16 */ 1150 1209 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 1151 AssertReturn(pbCodeBuf, UINT32_MAX);1152 1210 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP; 1153 1211 if (iGpr >= 8) … … 1170 1228 * Emits a store of a GPR value to a 8-bit VCpu field. 1171 1229 */ 1172 DECLINLINE(uint32_t) iemNativeEmitStoreGprFromVCpuU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 1230 DECL_INLINE_THROW(uint32_t) 1231 iemNativeEmitStoreGprFromVCpuU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 1173 1232 { 1174 1233 #ifdef RT_ARCH_AMD64 1175 1234 /* mov mem8, reg8 */ 1176 1235 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1177 AssertReturn(pbCodeBuf, UINT32_MAX);1178 1236 if (iGpr >= 8) 1179 1237 pbCodeBuf[off++] = X86_OP_REX_R; … … 1195 1253 * Emits a gprdst = gprsrc load. 1196 1254 */ 1197 DECLINLINE(uint32_t) iemNativeEmitLoadGprFromGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc) 1255 DECL_INLINE_THROW(uint32_t) 1256 iemNativeEmitLoadGprFromGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc) 1198 1257 { 1199 1258 #ifdef RT_ARCH_AMD64 1200 1259 /* mov gprdst, gprsrc */ 1201 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 1202 AssertReturn(pbCodeBuf, UINT32_MAX); 1260 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 1203 1261 if ((iGprDst | iGprSrc) >= 8) 1204 1262 pbCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W | X86_OP_REX_B … … 1211 1269 1212 1270 #elif RT_ARCH_ARM64 1271 /* mov dst, src; alias for: orr dst, xzr, src */ 1213 1272 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1214 AssertReturn(pu32CodeBuf, UINT32_MAX);1215 /* mov dst, src; alias for: orr dst, xzr, src */1216 1273 pu32CodeBuf[off++] = UINT32_C(0xaa000000) | ((uint32_t)iGprSrc << 16) | ((uint32_t)ARMV8_A64_REG_XZR << 5) | iGprDst; 1217 1274 … … 1253 1310 * Emits a 64-bit GRP load instruction with an BP relative source address. 1254 1311 */ 1255 DECLINLINE(uint32_t) iemNativeEmitLoadGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp) 1312 DECL_INLINE_THROW(uint32_t) 1313 iemNativeEmitLoadGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp) 1256 1314 { 1257 1315 /* mov gprdst, qword [rbp + offDisp] */ 1258 1316 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1259 AssertReturn(pbCodeBuf, UINT32_MAX);1260 1317 if (iGprDst < 8) 1261 1318 pbCodeBuf[off++] = X86_OP_REX_W; … … 1272 1329 * Emits a 32-bit GRP load instruction with an BP relative source address. 1273 1330 */ 1274 DECLINLINE(uint32_t) iemNativeEmitLoadGprByBpU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp) 1331 DECL_INLINE_THROW(uint32_t) 1332 iemNativeEmitLoadGprByBpU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp) 1275 1333 { 1276 1334 /* mov gprdst, dword [rbp + offDisp] */ 1277 1335 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1278 AssertReturn(pbCodeBuf, UINT32_MAX);1279 1336 if (iGprDst >= 8) 1280 1337 pbCodeBuf[off++] = X86_OP_REX_R; … … 1289 1346 * Emits a load effective address to a GRP with an BP relative source address. 1290 1347 */ 1291 DECLINLINE(uint32_t) iemNativeEmitLeaGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp) 1348 DECL_INLINE_THROW(uint32_t) 1349 iemNativeEmitLeaGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp) 1292 1350 { 1293 1351 /* lea gprdst, [rbp + offDisp] */ 1294 1352 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1295 AssertReturn(pbCodeBuf, UINT32_MAX);1296 1353 if (iGprDst < 8) 1297 1354 pbCodeBuf[off++] = X86_OP_REX_W; … … 1309 1366 * @note May trash IEMNATIVE_REG_FIXED_TMP0. 1310 1367 */ 1311 DECLINLINE(uint32_t) iemNativeEmitStoreGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offDisp, uint8_t iGprSrc) 1368 DECL_INLINE_THROW(uint32_t) 1369 iemNativeEmitStoreGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offDisp, uint8_t iGprSrc) 1312 1370 { 1313 1371 #ifdef RT_ARCH_AMD64 1314 1372 /* mov qword [rbp + offDisp], gprdst */ 1315 1373 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1316 AssertReturn(pbCodeBuf, UINT32_MAX);1317 1374 if (iGprSrc < 8) 1318 1375 pbCodeBuf[off++] = X86_OP_REX_W; … … 1327 1384 /* str w/ unsigned imm12 (scaled) */ 1328 1385 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1329 AssertReturn(pu32CodeBuf, UINT32_MAX);1330 1386 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Dword, iGprSrc, 1331 1387 ARMV8_A64_REG_BP, (uint32_t)offDisp / 8); … … 1335 1391 /* stur w/ signed imm9 (unscaled) */ 1336 1392 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1337 AssertReturn(pu32CodeBuf, UINT32_MAX);1338 1393 pu32CodeBuf[off++] = Armv8A64MkInstrSturLdur(kArmv8A64InstrLdStType_St_Dword, iGprSrc, ARMV8_A64_REG_BP, offDisp); 1339 1394 } … … 1343 1398 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, (uint32_t)offDisp); 1344 1399 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1345 AssertReturn(pu32CodeBuf, UINT32_MAX);1346 1400 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_St_Dword, iGprSrc, ARMV8_A64_REG_BP, 1347 1401 IEMNATIVE_REG_FIXED_TMP0, kArmv8A64InstrLdStExtend_Sxtw); … … 1361 1415 * @note May trash IEMNATIVE_REG_FIXED_TMP0. 1362 1416 */ 1363 DECLINLINE(uint32_t) iemNativeEmitStoreImm64ByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offDisp, uint64_t uImm64) 1417 DECL_INLINE_THROW(uint32_t) 1418 iemNativeEmitStoreImm64ByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offDisp, uint64_t uImm64) 1364 1419 { 1365 1420 #ifdef RT_ARCH_AMD64 … … 1367 1422 { 1368 1423 /* mov qword [rbp + offDisp], imm32 - sign extended */ 1369 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 11); 1370 AssertReturn(pbCodeBuf, UINT32_MAX); 1371 1424 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 11); 1372 1425 pbCodeBuf[off++] = X86_OP_REX_W; 1373 1426 pbCodeBuf[off++] = 0xc7; … … 1404 1457 * Common bit of iemNativeEmitLoadGprByGpr and friends. 1405 1458 */ 1406 DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByGprDisp(uint8_t *pbCodeBuf, uint32_t off,1407 1459 DECL_FORCE_INLINE(uint32_t) 1460 iemNativeEmitGprByGprDisp(uint8_t *pbCodeBuf, uint32_t off, uint8_t iGprReg, uint8_t iGprBase, int32_t offDisp) 1408 1461 { 1409 1462 if (offDisp == 0 && (iGprBase & 7) != X86_GREG_xBP) /* Can use encoding w/o displacement field. */ … … 1436 1489 * Common bit of iemNativeEmitLoadGprFromVCpuU64 and friends. 1437 1490 */ 1438 DECL_FORCE_INLINE (uint32_t) iemNativeEmitGprByGprLdSt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprReg,1439 uint8_t iGprBase, int32_t offDisp,1440 1491 DECL_FORCE_INLINE_THROW(uint32_t) 1492 iemNativeEmitGprByGprLdSt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprReg, 1493 uint8_t iGprBase, int32_t offDisp, ARMV8A64INSTRLDSTTYPE enmOperation, unsigned cbData) 1441 1494 { 1442 1495 /* … … 1449 1502 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */ 1450 1503 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1451 AssertReturn(pu32CodeBuf, UINT32_MAX);1452 1504 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGprReg, iGprBase, (uint32_t)offDisp / cbData); 1453 1505 } … … 1458 1510 /** @todo reduce by offVCpu by >> 3 or >> 2? if it saves instructions? */ 1459 1511 uint8_t const idxTmpReg = iemNativeRegAllocTmpImm(pReNative, off, (uint64)offDisp); 1460 AssertReturn(idxTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);1461 1512 1462 1513 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1463 AssertReturn(pu32CodeBuf, UINT32_MAX);1464 1514 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(enmOperation, iGprReg, iGprBase, idxTmpReg); 1465 1515 … … 1475 1525 * Emits a 64-bit GPR load via a GPR base address with a displacement. 1476 1526 */ 1477 DECL INLINE(uint32_t) iemNativeEmitLoadGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off,1478 1527 DECL_INLINE_THROW(uint32_t) 1528 iemNativeEmitLoadGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprBase, int32_t offDisp) 1479 1529 { 1480 1530 #ifdef RT_ARCH_AMD64 1481 1531 /* mov reg64, mem64 */ 1482 1532 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 1483 AssertReturn(pbCodeBuf, UINT32_MAX);1484 1533 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B); 1485 1534 pbCodeBuf[off++] = 0x8b; … … 1501 1550 * @note Bits 63 thru 32 in @a iGprDst will be cleared. 1502 1551 */ 1503 DECL INLINE(uint32_t) iemNativeEmitLoadGpr32ByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off,1504 1552 DECL_INLINE_THROW(uint32_t) 1553 iemNativeEmitLoadGpr32ByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprBase, int32_t offDisp) 1505 1554 { 1506 1555 #ifdef RT_ARCH_AMD64 1507 1556 /* mov reg32, mem32 */ 1508 1557 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 1509 AssertReturn(pbCodeBuf, UINT32_MAX);1510 1558 if (iGprDst >= 8 || iGprBase >= 8) 1511 1559 pbCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B); … … 1533 1581 * Emits a 64-bit GPR subtract with a signed immediate subtrahend. 1534 1582 */ 1535 DECLINLINE(uint32_t) iemNativeEmitSubGprImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t iSubtrahend) 1583 DECL_INLINE_THROW(uint32_t) 1584 iemNativeEmitSubGprImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t iSubtrahend) 1536 1585 { 1537 1586 /* sub gprdst, imm8/imm32 */ 1538 1587 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1539 AssertReturn(pbCodeBuf, UINT32_MAX);1540 1588 if (iGprDst < 8) 1541 1589 pbCodeBuf[off++] = X86_OP_REX_W; … … 1567 1615 * @note The AMD64 version sets flags. 1568 1616 */ 1569 DECLINLINE(uint32_t ) iemNativeEmitAddTwoGprs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend) 1617 DECL_INLINE_THROW(uint32_t) 1618 iemNativeEmitAddTwoGprs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend) 1570 1619 { 1571 1620 #if defined(RT_ARCH_AMD64) 1572 1621 /* add Gv,Ev */ 1573 1622 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 1574 AssertReturn(pbCodeBuf, UINT32_MAX);1575 1623 pbCodeBuf[off++] = (iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R) 1576 1624 | (iGprAddend < 8 ? 0 : X86_OP_REX_B); … … 1580 1628 #elif defined(RT_ARCH_ARM64) 1581 1629 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1582 AssertReturn(pu32CodeBuf, UINT32_MAX);1583 1630 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprDst, iGprAddend); 1584 1631 … … 1594 1641 * Emits a 64-bit GPR additions with a 8-bit signed immediate. 1595 1642 */ 1596 DECLINLINE(uint32_t ) iemNativeEmitAddGprImm8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int8_t iImm8) 1643 DECL_INLINE_THROW(uint32_t) 1644 iemNativeEmitAddGprImm8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int8_t iImm8) 1597 1645 { 1598 1646 #if defined(RT_ARCH_AMD64) 1647 /* add or inc */ 1599 1648 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); 1600 AssertReturn(pbCodeBuf, UINT32_MAX);1601 /* add or inc */1602 1649 pbCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B; 1603 1650 if (iImm8 != 1) … … 1615 1662 #elif defined(RT_ARCH_ARM64) 1616 1663 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1617 AssertReturn(pu32CodeBuf, UINT32_MAX);1618 1664 if (iImm8 >= 0) 1619 1665 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint8_t)iImm8); … … 1633 1679 * @note Bits 32 thru 63 in the GPR will be zero after the operation. 1634 1680 */ 1635 DECLINLINE(uint32_t ) iemNativeEmitAddGpr32Imm8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int8_t iImm8) 1681 DECL_INLINE_THROW(uint32_t) 1682 iemNativeEmitAddGpr32Imm8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int8_t iImm8) 1636 1683 { 1637 1684 #if defined(RT_ARCH_AMD64) 1685 /* add or inc */ 1638 1686 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); 1639 AssertReturn(pbCodeBuf, UINT32_MAX);1640 /* add or inc */1641 1687 if (iGprDst >= 8) 1642 1688 pbCodeBuf[off++] = X86_OP_REX_B; … … 1655 1701 #elif defined(RT_ARCH_ARM64) 1656 1702 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1657 AssertReturn(pu32CodeBuf, UINT32_MAX);1658 1703 if (iImm8 >= 0) 1659 1704 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint8_t)iImm8, false /*f64Bit*/); … … 1672 1717 * Emits a 64-bit GPR additions with a 64-bit signed addend. 1673 1718 */ 1674 DECLINLINE(uint32_t ) iemNativeEmitAddGprImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int64_t iAddend) 1719 DECL_INLINE_THROW(uint32_t) 1720 iemNativeEmitAddGprImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int64_t iAddend) 1675 1721 { 1676 1722 #if defined(RT_ARCH_AMD64) … … 1682 1728 /* add grp, imm32 */ 1683 1729 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1684 AssertReturn(pbCodeBuf, UINT32_MAX);1685 1730 pbCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B; 1686 1731 pbCodeBuf[off++] = 0x81; … … 1695 1740 /* Best to use a temporary register to deal with this in the simplest way: */ 1696 1741 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, (uint64_t)iAddend); 1697 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);1698 1742 1699 1743 /* add dst, tmpreg */ 1700 1744 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 1701 AssertReturn(pbCodeBuf, UINT32_MAX);1702 1745 pbCodeBuf[off++] = (iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R) 1703 1746 | (iTmpReg < 8 ? 0 : X86_OP_REX_B); … … 1712 1755 { 1713 1756 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1714 AssertReturn(pu32CodeBuf, UINT32_MAX);1715 1757 if (iAddend >= 0) 1716 1758 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint32_t)iAddend); … … 1722 1764 /* Use temporary register for the immediate. */ 1723 1765 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, (uint64_t)iAddend); 1724 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);1725 1766 1726 1767 /* add gprdst, gprdst, tmpreg */ 1727 1768 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1728 AssertReturn(pu32CodeBuf, UINT32_MAX);1729 1769 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprDst, iTmpReg); 1730 1770 … … 1744 1784 * @note Bits 32 thru 63 in the GPR will be zero after the operation. 1745 1785 */ 1746 DECLINLINE(uint32_t ) iemNativeEmitAddGpr32Imm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t iAddend) 1786 DECL_INLINE_THROW(uint32_t) 1787 iemNativeEmitAddGpr32Imm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t iAddend) 1747 1788 { 1748 1789 #if defined(RT_ARCH_AMD64) … … 1752 1793 /* add grp, imm32 */ 1753 1794 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1754 AssertReturn(pbCodeBuf, UINT32_MAX);1755 1795 if (iGprDst >= 8) 1756 1796 pbCodeBuf[off++] = X86_OP_REX_B; … … 1766 1806 { 1767 1807 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1768 AssertReturn(pu32CodeBuf, UINT32_MAX);1769 1808 if (iAddend >= 0) 1770 1809 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint32_t)iAddend, false /*f64Bit*/); … … 1776 1815 /* Use temporary register for the immediate. */ 1777 1816 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, (uint32_t)iAddend); 1778 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);1779 1817 1780 1818 /* add gprdst, gprdst, tmpreg */ 1781 1819 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1782 AssertReturn(pu32CodeBuf, UINT32_MAX);1783 1820 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprDst, iTmpReg, false /*f64Bit*/); 1784 1821 … … 1802 1839 * Emits code for clearing bits 16 thru 63 in the GPR. 1803 1840 */ 1804 DECLINLINE(uint32_t ) iemNativeEmitClear16UpGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst) 1841 DECL_INLINE_THROW(uint32_t) 1842 iemNativeEmitClear16UpGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst) 1805 1843 { 1806 1844 #if defined(RT_ARCH_AMD64) 1807 1845 /* movzx reg32, reg16 */ 1808 1846 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); 1809 AssertReturn(pbCodeBuf, UINT32_MAX);1810 1847 if (iGprDst >= 8) 1811 1848 pbCodeBuf[off++] = X86_OP_REX_B | X86_OP_REX_R; … … 1816 1853 #elif defined(RT_ARCH_ARM64) 1817 1854 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1818 AssertReturn(pu32CodeBuf, UINT32_MAX);1819 1855 # if 1 1820 1856 pu32CodeBuf[off++] = Armv8A64MkInstrUxth(iGprDst, iGprDst); … … 1837 1873 * and ARM64 hosts. 1838 1874 */ 1839 DECL INLINE(uint32_t ) iemNativeEmitAndGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc,1840 1875 DECL_INLINE_THROW(uint32_t) 1876 iemNativeEmitAndGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc, bool fSetFlags = false) 1841 1877 { 1842 1878 #if defined(RT_ARCH_AMD64) 1843 1879 /* and Gv, Ev */ 1844 1880 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 1845 AssertReturn(pbCodeBuf, UINT32_MAX);1846 1881 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B); 1847 1882 pbCodeBuf[off++] = 0x23; … … 1851 1886 #elif defined(RT_ARCH_ARM64) 1852 1887 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1853 AssertReturn(pu32CodeBuf, UINT32_MAX);1854 1888 if (!fSetFlags) 1855 1889 pu32CodeBuf[off++] = Armv8A64MkInstrAnd(iGprDst, iGprDst, iGprSrc); … … 1868 1902 * Emits code for AND'ing two 32-bit GPRs. 1869 1903 */ 1870 DECLINLINE(uint32_t ) iemNativeEmitAndGpr32ByGpr32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc) 1904 DECL_INLINE_THROW(uint32_t) 1905 iemNativeEmitAndGpr32ByGpr32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc) 1871 1906 { 1872 1907 #if defined(RT_ARCH_AMD64) 1873 1908 /* and Gv, Ev */ 1874 1909 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 1875 AssertReturn(pbCodeBuf, UINT32_MAX);1876 1910 if (iGprDst >= 8 || iGprSrc >= 8) 1877 1911 pbCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B); … … 1881 1915 #elif defined(RT_ARCH_ARM64) 1882 1916 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1883 AssertReturn(pu32CodeBuf, UINT32_MAX);1884 1917 pu32CodeBuf[off++] = Armv8A64MkInstrAnd(iGprDst, iGprDst, iGprSrc, false /*f64Bit*/); 1885 1918 … … 1898 1931 * and ARM64 hosts. 1899 1932 */ 1900 DECL INLINE(uint32_t ) iemNativeEmitAndGprByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint64_t uImm,1901 1933 DECL_INLINE_THROW(uint32_t) 1934 iemNativeEmitAndGprByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint64_t uImm, bool fSetFlags = false) 1902 1935 { 1903 1936 #if defined(RT_ARCH_AMD64) … … 1906 1939 /* and Ev, imm8 */ 1907 1940 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); 1908 AssertReturn(pbCodeBuf, UINT32_MAX);1909 1941 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R); 1910 1942 pbCodeBuf[off++] = 0x83; … … 1916 1948 /* and Ev, imm32 */ 1917 1949 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1918 AssertReturn(pbCodeBuf, UINT32_MAX);1919 1950 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R); 1920 1951 pbCodeBuf[off++] = 0x81; … … 1929 1960 /* Use temporary register for the 64-bit immediate. */ 1930 1961 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm); 1931 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);1932 1962 off = iemNativeEmitAndGprByGpr(pReNative, off, iGprDst, iTmpReg); 1933 1963 iemNativeRegFreeTmpImm(pReNative, iTmpReg); … … 1941 1971 { 1942 1972 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1943 AssertReturn(pu32CodeBuf, UINT32_MAX);1944 1973 if (!fSetFlags) 1945 1974 pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, uImmNandS, uImmR); … … 1951 1980 /* Use temporary register for the 64-bit immediate. */ 1952 1981 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm); 1953 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);1954 1982 off = iemNativeEmitAndGprByGpr(pReNative, off, iGprDst, iTmpReg, fSetFlags); 1955 1983 iemNativeRegFreeTmpImm(pReNative, iTmpReg); … … 1967 1995 * Emits code for AND'ing an 32-bit GPRs with a constant. 1968 1996 */ 1969 DECL INLINE(uint32_t ) iemNativeEmitAndGpr32ByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint32_t uImm,1970 1997 DECL_INLINE_THROW(uint32_t) 1998 iemNativeEmitAndGpr32ByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint32_t uImm, bool fSetFlags = false) 1971 1999 { 1972 2000 #if defined(RT_ARCH_AMD64) 1973 2001 /* and Ev, imm */ 1974 2002 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1975 AssertReturn(pbCodeBuf, UINT32_MAX);1976 2003 if (iGprDst >= 8) 1977 2004 pbCodeBuf[off++] = X86_OP_REX_R; … … 1999 2026 { 2000 2027 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2001 AssertReturn(pu32CodeBuf, UINT32_MAX);2002 2028 if (!fSetFlags) 2003 2029 pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/); … … 2009 2035 /* Use temporary register for the 64-bit immediate. */ 2010 2036 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm); 2011 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);2012 2037 if (!fSetFlags) 2013 2038 off = iemNativeEmitAndGpr32ByGpr32(pReNative, off, iGprDst, iTmpReg); … … 2028 2053 * Emits code for XOR'ing two 64-bit GPRs. 2029 2054 */ 2030 DECLINLINE(uint32_t ) iemNativeEmitXorGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc) 2055 DECL_INLINE_THROW(uint32_t) 2056 iemNativeEmitXorGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc) 2031 2057 { 2032 2058 #if defined(RT_ARCH_AMD64) 2033 2059 /* and Gv, Ev */ 2034 2060 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 2035 AssertReturn(pbCodeBuf, UINT32_MAX);2036 2061 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B); 2037 2062 pbCodeBuf[off++] = 0x33; … … 2040 2065 #elif defined(RT_ARCH_ARM64) 2041 2066 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2042 AssertReturn(pu32CodeBuf, UINT32_MAX);2043 2067 pu32CodeBuf[off++] = Armv8A64MkInstrEor(iGprDst, iGprDst, iGprSrc); 2044 2068 … … 2054 2078 * Emits code for XOR'ing two 32-bit GPRs. 2055 2079 */ 2056 DECLINLINE(uint32_t ) iemNativeEmitXorGpr32ByGpr32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc) 2080 DECL_INLINE_THROW(uint32_t) 2081 iemNativeEmitXorGpr32ByGpr32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc) 2057 2082 { 2058 2083 #if defined(RT_ARCH_AMD64) 2059 2084 /* and Gv, Ev */ 2060 2085 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 2061 AssertReturn(pbCodeBuf, UINT32_MAX);2062 2086 if (iGprDst >= 8 || iGprSrc >= 8) 2063 2087 pbCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B); … … 2067 2091 #elif defined(RT_ARCH_ARM64) 2068 2092 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2069 AssertReturn(pu32CodeBuf, UINT32_MAX);2070 2093 pu32CodeBuf[off++] = Armv8A64MkInstrEor(iGprDst, iGprDst, iGprSrc, false /*f64Bit*/); 2071 2094 … … 2085 2108 * Emits code for shifting a GPR a fixed number of bits to the left. 2086 2109 */ 2087 DECLINLINE(uint32_t ) iemNativeEmitShiftGprLeft(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift) 2110 DECL_INLINE_THROW(uint32_t) 2111 iemNativeEmitShiftGprLeft(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift) 2088 2112 { 2089 2113 Assert(cShift > 0 && cShift < 64); … … 2092 2116 /* shl dst, cShift */ 2093 2117 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); 2094 AssertReturn(pbCodeBuf, UINT32_MAX);2095 2118 pbCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B; 2096 2119 if (cShift != 1) … … 2108 2131 #elif defined(RT_ARCH_ARM64) 2109 2132 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2110 AssertReturn(pu32CodeBuf, UINT32_MAX);2111 2133 pu32CodeBuf[off++] = Armv8A64MkInstrLslImm(iGprDst, iGprDst, cShift); 2112 2134 … … 2122 2144 * Emits code for shifting a 32-bit GPR a fixed number of bits to the left. 2123 2145 */ 2124 DECLINLINE(uint32_t ) iemNativeEmitShiftGpr32Left(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift) 2146 DECL_INLINE_THROW(uint32_t) 2147 iemNativeEmitShiftGpr32Left(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift) 2125 2148 { 2126 2149 Assert(cShift > 0 && cShift < 32); … … 2129 2152 /* shl dst, cShift */ 2130 2153 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); 2131 AssertReturn(pbCodeBuf, UINT32_MAX);2132 2154 if (iGprDst >= 8) 2133 2155 pbCodeBuf[off++] = X86_OP_REX_B; … … 2146 2168 #elif defined(RT_ARCH_ARM64) 2147 2169 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2148 AssertReturn(pu32CodeBuf, UINT32_MAX);2149 2170 pu32CodeBuf[off++] = Armv8A64MkInstrLslImm(iGprDst, iGprDst, cShift, false /*64Bit*/); 2150 2171 … … 2160 2181 * Emits code for (unsigned) shifting a GPR a fixed number of bits to the right. 2161 2182 */ 2162 DECLINLINE(uint32_t ) iemNativeEmitShiftGprRight(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift) 2183 DECL_INLINE_THROW(uint32_t) 2184 iemNativeEmitShiftGprRight(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift) 2163 2185 { 2164 2186 Assert(cShift > 0 && cShift < 64); … … 2167 2189 /* shr dst, cShift */ 2168 2190 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); 2169 AssertReturn(pbCodeBuf, UINT32_MAX);2170 2191 pbCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B; 2171 2192 if (cShift != 1) … … 2183 2204 #elif defined(RT_ARCH_ARM64) 2184 2205 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2185 AssertReturn(pu32CodeBuf, UINT32_MAX);2186 2206 pu32CodeBuf[off++] = Armv8A64MkInstrLsrImm(iGprDst, iGprDst, cShift); 2187 2207 … … 2198 2218 * right. 2199 2219 */ 2200 DECLINLINE(uint32_t ) iemNativeEmitShiftGpr32Right(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift) 2220 DECL_INLINE_THROW(uint32_t) 2221 iemNativeEmitShiftGpr32Right(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift) 2201 2222 { 2202 2223 Assert(cShift > 0 && cShift < 32); … … 2205 2226 /* shr dst, cShift */ 2206 2227 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); 2207 AssertReturn(pbCodeBuf, UINT32_MAX);2208 2228 if (iGprDst >= 8) 2209 2229 pbCodeBuf[off++] = X86_OP_REX_B; … … 2222 2242 #elif defined(RT_ARCH_ARM64) 2223 2243 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2224 AssertReturn(pu32CodeBuf, UINT32_MAX);2225 2244 pu32CodeBuf[off++] = Armv8A64MkInstrLsrImm(iGprDst, iGprDst, cShift, false /*64Bit*/); 2226 2245 … … 2243 2262 * Emits an ARM64 compare instruction. 2244 2263 */ 2245 DECL INLINE(uint32_t) iemNativeEmitCmpArm64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint8_t iGprRight,2246 bool f64Bit = true, uint32_t cShift = 0,2247 2264 DECL_INLINE_THROW(uint32_t) 2265 iemNativeEmitCmpArm64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint8_t iGprRight, 2266 bool f64Bit = true, uint32_t cShift = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_Lsr) 2248 2267 { 2249 2268 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2250 AssertReturn(pu32CodeBuf, UINT32_MAX);2251 2269 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(true /*fSub*/, ARMV8_A64_REG_XZR /*iRegResult*/, iGprLeft, iGprRight, 2252 2270 f64Bit, true /*fSetFlags*/, cShift, enmShift); … … 2261 2279 * with conditional instruction. 2262 2280 */ 2263 DECLINLINE(uint32_t) iemNativeEmitCmpGprWithGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint8_t iGprRight) 2281 DECL_INLINE_THROW(uint32_t) 2282 iemNativeEmitCmpGprWithGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint8_t iGprRight) 2264 2283 { 2265 2284 #ifdef RT_ARCH_AMD64 2266 2285 /* cmp Gv, Ev */ 2267 2286 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 2268 AssertReturn(pbCodeBuf, UINT32_MAX);2269 2287 pbCodeBuf[off++] = X86_OP_REX_W | (iGprLeft >= 8 ? X86_OP_REX_R : 0) | (iGprRight >= 8 ? X86_OP_REX_B : 0); 2270 2288 pbCodeBuf[off++] = 0x3b; … … 2286 2304 * with conditional instruction. 2287 2305 */ 2288 DECL INLINE(uint32_t) iemNativeEmitCmpGpr32WithGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off,2289 2306 DECL_INLINE_THROW(uint32_t) 2307 iemNativeEmitCmpGpr32WithGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint8_t iGprRight) 2290 2308 { 2291 2309 #ifdef RT_ARCH_AMD64 2292 2310 /* cmp Gv, Ev */ 2293 2311 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 2294 AssertReturn(pbCodeBuf, UINT32_MAX);2295 2312 if (iGprLeft >= 8 || iGprRight >= 8) 2296 2313 pbCodeBuf[off++] = (iGprLeft >= 8 ? X86_OP_REX_R : 0) | (iGprRight >= 8 ? X86_OP_REX_B : 0); … … 2313 2330 * flags/whatever for use with conditional instruction. 2314 2331 */ 2315 DECLINLINE(uint32_t) iemNativeEmitCmpGprWithImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint64_t uImm) 2332 DECL_INLINE_THROW(uint32_t) 2333 iemNativeEmitCmpGprWithImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint64_t uImm) 2316 2334 { 2317 2335 #ifdef RT_ARCH_AMD64 … … 2320 2338 /* cmp Ev, Ib */ 2321 2339 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); 2322 AssertReturn(pbCodeBuf, UINT32_MAX);2323 2340 pbCodeBuf[off++] = X86_OP_REX_W | (iGprLeft >= 8 ? X86_OP_REX_B : 0); 2324 2341 pbCodeBuf[off++] = 0x83; … … 2330 2347 /* cmp Ev, imm */ 2331 2348 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 2332 AssertReturn(pbCodeBuf, UINT32_MAX);2333 2349 pbCodeBuf[off++] = X86_OP_REX_W | (iGprLeft >= 8 ? X86_OP_REX_B : 0); 2334 2350 pbCodeBuf[off++] = 0x81; … … 2343 2359 { 2344 2360 /* Use temporary register for the immediate. */ 2345 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm); 2346 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX); 2347 2361 uint8_t const iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm); 2348 2362 off = iemNativeEmitCmpGprWithGpr(pReNative, off, iGprLeft, iTmpReg); 2349 2350 2363 iemNativeRegFreeTmpImm(pReNative, iTmpReg); 2351 2364 } … … 2356 2369 { 2357 2370 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2358 AssertReturn(pu32CodeBuf, UINT32_MAX);2359 2371 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm, 2360 2372 true /*64Bit*/, true /*fSetFlags*/); … … 2363 2375 { 2364 2376 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2365 AssertReturn(pu32CodeBuf, UINT32_MAX);2366 2377 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm, 2367 2378 true /*64Bit*/, true /*fSetFlags*/, true /*fShift12*/); … … 2371 2382 /* Use temporary register for the immediate. */ 2372 2383 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm); 2373 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);2374 2375 2384 off = iemNativeEmitCmpGprWithGpr(pReNative, off, iGprLeft, iTmpReg); 2376 2377 2385 iemNativeRegFreeTmpImm(pReNative, iTmpReg); 2378 2386 } … … 2391 2399 * flags/whatever for use with conditional instruction. 2392 2400 */ 2393 DECLINLINE(uint32_t) iemNativeEmitCmpGpr32WithImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint32_t uImm) 2401 DECL_INLINE_THROW(uint32_t) 2402 iemNativeEmitCmpGpr32WithImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint32_t uImm) 2394 2403 { 2395 2404 #ifdef RT_ARCH_AMD64 2396 2405 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 2397 AssertReturn(pbCodeBuf, UINT32_MAX);2398 2406 if (iGprLeft >= 8) 2399 2407 pbCodeBuf[off++] = X86_OP_REX_B; … … 2422 2430 { 2423 2431 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2424 AssertReturn(pu32CodeBuf, UINT32_MAX);2425 2432 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm, 2426 2433 false /*64Bit*/, true /*fSetFlags*/); … … 2429 2436 { 2430 2437 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2431 AssertReturn(pu32CodeBuf, UINT32_MAX);2432 2438 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm, 2433 2439 false /*64Bit*/, true /*fSetFlags*/, true /*fShift12*/); … … 2437 2443 /* Use temporary register for the immediate. */ 2438 2444 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm); 2439 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);2440 2441 2445 off = iemNativeEmitCmpGpr32WithGpr(pReNative, off, iGprLeft, iTmpReg); 2442 2443 2446 iemNativeRegFreeTmpImm(pReNative, iTmpReg); 2444 2447 } … … 2461 2464 * Emits a JMP rel32 / B imm19 to the given label. 2462 2465 */ 2463 DECLINLINE(uint32_t) iemNativeEmitJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel) 2466 DECL_INLINE_THROW(uint32_t) 2467 iemNativeEmitJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel) 2464 2468 { 2465 2469 Assert(idxLabel < pReNative->cLabels); … … 2467 2471 #ifdef RT_ARCH_AMD64 2468 2472 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6); 2469 AssertReturn(pbCodeBuf, UINT32_MAX);2470 2473 if (pReNative->paLabels[idxLabel].off != UINT32_MAX) 2471 2474 { … … 2489 2492 { 2490 2493 pbCodeBuf[off++] = 0xe9; /* jmp rel32 */ 2491 AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_Rel32, -4), UINT32_MAX);2494 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_Rel32, -4); 2492 2495 pbCodeBuf[off++] = 0xfe; 2493 2496 pbCodeBuf[off++] = 0xff; … … 2499 2502 #elif defined(RT_ARCH_ARM64) 2500 2503 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2501 AssertReturn(pu32CodeBuf, UINT32_MAX);2502 2504 if (pReNative->paLabels[idxLabel].off != UINT32_MAX) 2503 2505 pu32CodeBuf[off++] = Armv8A64MkInstrB(pReNative->paLabels[idxReturnLabel].off - off); 2504 2506 else 2505 2507 { 2506 AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5), UINT32_MAX);2508 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5); 2507 2509 pu32CodeBuf[off++] = Armv8A64MkInstrB(-1); 2508 2510 } … … 2519 2521 * Emits a JMP rel32 / B imm19 to a new undefined label. 2520 2522 */ 2521 DECL INLINE(uint32_t) iemNativeEmitJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,2522 2523 DECL_INLINE_THROW(uint32_t) 2524 iemNativeEmitJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0) 2523 2525 { 2524 2526 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData); 2525 AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);2526 2527 return iemNativeEmitJmpToLabel(pReNative, off, idxLabel); 2527 2528 } … … 2558 2559 * Emits a Jcc rel32 / B.cc imm19 to the given label (ASSUMED requiring fixup). 2559 2560 */ 2560 DECL INLINE(uint32_t) iemNativeEmitJccToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,2561 2561 DECL_INLINE_THROW(uint32_t) 2562 iemNativeEmitJccToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel, IEMNATIVEINSTRCOND enmCond) 2562 2563 { 2563 2564 Assert(idxLabel < pReNative->cLabels); … … 2566 2567 /* jcc rel32 */ 2567 2568 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6); 2568 AssertReturn(pbCodeBuf, UINT32_MAX);2569 2569 pbCodeBuf[off++] = 0x0f; 2570 2570 pbCodeBuf[off++] = (uint8_t)enmCond | 0x80; 2571 AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_Rel32, -4), UINT32_MAX);2571 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_Rel32, -4); 2572 2572 pbCodeBuf[off++] = 0x00; 2573 2573 pbCodeBuf[off++] = 0x00; … … 2577 2577 #elif defined(RT_ARCH_ARM64) 2578 2578 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2579 AssertReturn(pu32CodeBuf, UINT32_MAX); 2580 AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5), UINT32_MAX); 2579 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5); 2581 2580 pu32CodeBuf[off++] = Armv8A64MkInstrBCond(enmCond, -1); 2582 2581 … … 2592 2591 * Emits a Jcc rel32 / B.cc imm19 to a new label. 2593 2592 */ 2594 DECLINLINE(uint32_t) iemNativeEmitJccToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 2595 IEMNATIVELABELTYPE enmLabelType, uint16_t uData, IEMNATIVEINSTRCOND enmCond) 2593 DECL_INLINE_THROW(uint32_t) 2594 iemNativeEmitJccToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 2595 IEMNATIVELABELTYPE enmLabelType, uint16_t uData, IEMNATIVEINSTRCOND enmCond) 2596 2596 { 2597 2597 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData); 2598 AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);2599 2598 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, enmCond); 2600 2599 } … … 2604 2603 * Emits a JZ/JE rel32 / B.EQ imm19 to the given label. 2605 2604 */ 2606 DECL INLINE(uint32_t) iemNativeEmitJzToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)2605 DECL_INLINE_THROW(uint32_t) iemNativeEmitJzToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel) 2607 2606 { 2608 2607 #ifdef RT_ARCH_AMD64 … … 2618 2617 * Emits a JZ/JE rel32 / B.EQ imm19 to a new label. 2619 2618 */ 2620 DECL INLINE(uint32_t) iemNativeEmitJzToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,2621 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)2619 DECL_INLINE_THROW(uint32_t) iemNativeEmitJzToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 2620 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0) 2622 2621 { 2623 2622 #ifdef RT_ARCH_AMD64 … … 2634 2633 * Emits a JNZ/JNE rel32 / B.NE imm19 to the given label. 2635 2634 */ 2636 DECL INLINE(uint32_t) iemNativeEmitJnzToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)2635 DECL_INLINE_THROW(uint32_t) iemNativeEmitJnzToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel) 2637 2636 { 2638 2637 #ifdef RT_ARCH_AMD64 … … 2648 2647 * Emits a JNZ/JNE rel32 / B.NE imm19 to a new label. 2649 2648 */ 2650 DECL INLINE(uint32_t) iemNativeEmitJnzToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,2651 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)2649 DECL_INLINE_THROW(uint32_t) iemNativeEmitJnzToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 2650 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0) 2652 2651 { 2653 2652 #ifdef RT_ARCH_AMD64 … … 2664 2663 * Emits a JBE/JNA rel32 / B.LS imm19 to the given label. 2665 2664 */ 2666 DECL INLINE(uint32_t) iemNativeEmitJbeToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)2665 DECL_INLINE_THROW(uint32_t) iemNativeEmitJbeToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel) 2667 2666 { 2668 2667 #ifdef RT_ARCH_AMD64 … … 2678 2677 * Emits a JBE/JNA rel32 / B.LS imm19 to a new label. 2679 2678 */ 2680 DECL INLINE(uint32_t) iemNativeEmitJbeToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,2681 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)2679 DECL_INLINE_THROW(uint32_t) iemNativeEmitJbeToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 2680 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0) 2682 2681 { 2683 2682 #ifdef RT_ARCH_AMD64 … … 2694 2693 * Emits a JA/JNBE rel32 / B.HI imm19 to the given label. 2695 2694 */ 2696 DECL INLINE(uint32_t) iemNativeEmitJaToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)2695 DECL_INLINE_THROW(uint32_t) iemNativeEmitJaToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel) 2697 2696 { 2698 2697 #ifdef RT_ARCH_AMD64 … … 2708 2707 * Emits a JA/JNBE rel32 / B.HI imm19 to a new label. 2709 2708 */ 2710 DECL INLINE(uint32_t) iemNativeEmitJaToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,2711 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)2709 DECL_INLINE_THROW(uint32_t) iemNativeEmitJaToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 2710 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0) 2712 2711 { 2713 2712 #ifdef RT_ARCH_AMD64 … … 2725 2724 * How @a offJmp is applied is are target specific. 2726 2725 */ 2727 DECL INLINE(uint32_t) iemNativeEmitJccToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off,2728 2726 DECL_INLINE_THROW(uint32_t) 2727 iemNativeEmitJccToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget, IEMNATIVEINSTRCOND enmCond) 2729 2728 { 2730 2729 #ifdef RT_ARCH_AMD64 2731 2730 /* jcc rel32 */ 2732 2731 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6); 2733 AssertReturn(pbCodeBuf, UINT32_MAX);2734 2732 if (offTarget < 128 && offTarget >= -128) 2735 2733 { … … 2749 2747 #elif defined(RT_ARCH_ARM64) 2750 2748 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2751 AssertReturn(pu32CodeBuf, UINT32_MAX);2752 2749 pu32CodeBuf[off++] = Armv8A64MkInstrBCond(enmCond, offTarget); 2753 2750 … … 2764 2761 * How @a offJmp is applied is are target specific. 2765 2762 */ 2766 DECL INLINE(uint32_t) iemNativeEmitJzToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)2763 DECL_INLINE_THROW(uint32_t) iemNativeEmitJzToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget) 2767 2764 { 2768 2765 #ifdef RT_ARCH_AMD64 … … 2780 2777 * How @a offJmp is applied is are target specific. 2781 2778 */ 2782 DECL INLINE(uint32_t) iemNativeEmitJnzToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)2779 DECL_INLINE_THROW(uint32_t) iemNativeEmitJnzToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget) 2783 2780 { 2784 2781 #ifdef RT_ARCH_AMD64 … … 2796 2793 * How @a offJmp is applied is are target specific. 2797 2794 */ 2798 DECL INLINE(uint32_t) iemNativeEmitJbeToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)2795 DECL_INLINE_THROW(uint32_t) iemNativeEmitJbeToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget) 2799 2796 { 2800 2797 #ifdef RT_ARCH_AMD64 … … 2812 2809 * How @a offJmp is applied is are target specific. 2813 2810 */ 2814 DECL INLINE(uint32_t) iemNativeEmitJaToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)2811 DECL_INLINE_THROW(uint32_t) iemNativeEmitJaToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget) 2815 2812 { 2816 2813 #ifdef RT_ARCH_AMD64 … … 2861 2858 * Internal helper, don't call directly. 2862 2859 */ 2863 DECL INLINE(uint32_t) iemNativeEmitTestBitInGprAndJmpToLabelIfCc(PIEMRECOMPILERSTATE pReNative, uint32_t off,2864 uint8_t iGprSrc, uint8_t iBitNo, uint32_t idxLabel,2865 2860 DECL_INLINE_THROW(uint32_t) 2861 iemNativeEmitTestBitInGprAndJmpToLabelIfCc(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc, 2862 uint8_t iBitNo, uint32_t idxLabel, bool fJmpIfSet) 2866 2863 { 2867 2864 Assert(iBitNo < 64); 2868 2865 #ifdef RT_ARCH_AMD64 2869 2866 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5); 2870 AssertReturn(pbCodeBuf, UINT32_MAX);2871 2867 if (iBitNo < 8) 2872 2868 { … … 2896 2892 /* Use the TBNZ instruction here. */ 2897 2893 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2898 AssertReturn(pu32CodeBuf, UINT32_MAX); 2899 AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm14At5), UINT32_MAX); 2894 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm14At5); 2900 2895 pu32CodeBuf[off++] = Armv8A64MkInstrTbzTbnz(fJmpIfSet, 0, iGprSrc, iBitNo); 2901 2896 … … 2914 2909 * @note On ARM64 the range is only +/-8191 instructions. 2915 2910 */ 2916 DECL INLINE(uint32_t) iemNativeEmitTestBitInGprAndJmpToLabelIfSet(PIEMRECOMPILERSTATE pReNative, uint32_t off,2917 uint8_t iGprSrc, uint8_t iBitNo, uint32_t idxLabel)2911 DECL_INLINE_THROW(uint32_t) iemNativeEmitTestBitInGprAndJmpToLabelIfSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, 2912 uint8_t iGprSrc, uint8_t iBitNo, uint32_t idxLabel) 2918 2913 { 2919 2914 return iemNativeEmitTestBitInGprAndJmpToLabelIfCc(pReNative, off, iGprSrc, iBitNo, idxLabel, true /*fJmpIfSet*/); … … 2927 2922 * @note On ARM64 the range is only +/-8191 instructions. 2928 2923 */ 2929 DECL INLINE(uint32_t) iemNativeEmitTestBitInGprAndJmpToLabelIfNotSet(PIEMRECOMPILERSTATE pReNative, uint32_t off,2930 uint8_t iGprSrc, uint8_t iBitNo, uint32_t idxLabel)2924 DECL_INLINE_THROW(uint32_t) iemNativeEmitTestBitInGprAndJmpToLabelIfNotSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, 2925 uint8_t iGprSrc, uint8_t iBitNo, uint32_t idxLabel) 2931 2926 { 2932 2927 return iemNativeEmitTestBitInGprAndJmpToLabelIfCc(pReNative, off, iGprSrc, iBitNo, idxLabel, false /*fJmpIfSet*/); … … 2938 2933 * flags accordingly. 2939 2934 */ 2940 DECLINLINE(uint32_t) iemNativeEmitTestAnyBitsInGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc, uint64_t fBits) 2935 DECL_INLINE_THROW(uint32_t) 2936 iemNativeEmitTestAnyBitsInGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc, uint64_t fBits) 2941 2937 { 2942 2938 Assert(fBits != 0); … … 2946 2942 { 2947 2943 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, fBits); 2948 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);2949 2944 2950 2945 /* test Ev,Gv */ 2951 2946 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5); 2952 AssertReturn(pbCodeBuf, UINT32_MAX);2953 2947 pbCodeBuf[off++] = X86_OP_REX_W | (iGprSrc < 8 ? 0 : X86_OP_REX_R) | (iTmpReg < 8 ? 0 : X86_OP_REX_B); 2954 2948 pbCodeBuf[off++] = 0x85; … … 2961 2955 /* test Eb, imm8 or test Ev, imm32 */ 2962 2956 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 2963 AssertReturn(pbCodeBuf, UINT32_MAX);2964 2957 if (fBits <= UINT8_MAX) 2965 2958 { … … 2984 2977 /** @todo implement me. */ 2985 2978 else 2986 AssertFailed Return(UINT32_MAX);2979 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_EMIT_CASE_NOT_IMPLEMENTED_1)); 2987 2980 2988 2981 #elif defined(RT_ARCH_ARM64) … … 2994 2987 else 2995 2988 { 2996 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, fBits);2997 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);2998 2999 2989 /* ands Zr, iGprSrc, iTmpReg */ 2990 uint8_t const iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, fBits); 3000 2991 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 3001 AssertReturn(pu32CodeBuf, UINT32_MAX);3002 2992 pu32CodeBuf[off++] = Armv8A64MkInstrAnds(ARMV8_A64_REG_XZR, iGprSrc, iTmpReg); 3003 3004 2993 iemNativeRegFreeTmpImm(pReNative, iTmpReg); 3005 2994 } … … 3017 3006 * are set in @a iGprSrc. 3018 3007 */ 3019 DECLINLINE(uint32_t) iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfAnySet(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3020 uint8_t iGprSrc, uint64_t fBits, uint32_t idxLabel) 3008 DECL_INLINE_THROW(uint32_t) 3009 iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfAnySet(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3010 uint8_t iGprSrc, uint64_t fBits, uint32_t idxLabel) 3021 3011 { 3022 3012 Assert(fBits); Assert(!RT_IS_POWER_OF_TWO(fBits)); … … 3033 3023 * are set in @a iGprSrc. 3034 3024 */ 3035 DECLINLINE(uint32_t) iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfNoneSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3036 uint8_t iGprSrc, uint64_t fBits, uint32_t idxLabel) 3025 DECL_INLINE_THROW(uint32_t) 3026 iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfNoneSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3027 uint8_t iGprSrc, uint64_t fBits, uint32_t idxLabel) 3037 3028 { 3038 3029 Assert(fBits); Assert(!RT_IS_POWER_OF_TWO(fBits)); … … 3050 3041 * The operand size is given by @a f64Bit. 3051 3042 */ 3052 DECL INLINE(uint32_t) iemNativeEmitTestIfGprIsZeroAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,3053 uint8_t iGprSrc, bool f64Bit, uint32_t idxLabel)3043 DECL_INLINE_THROW(uint32_t) iemNativeEmitTestIfGprIsZeroAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3044 uint8_t iGprSrc, bool f64Bit, uint32_t idxLabel) 3054 3045 { 3055 3046 Assert(idxLabel < pReNative->cLabels); … … 3058 3049 /* test reg32,reg32 / test reg64,reg64 */ 3059 3050 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 3060 AssertReturn(pbCodeBuf, UINT32_MAX);3061 3051 if (f64Bit) 3062 3052 pbCodeBuf[off++] = X86_OP_REX_W | (iGprSrc < 8 ? 0 : X86_OP_REX_R | X86_OP_REX_B); … … 3072 3062 #elif defined(RT_ARCH_ARM64) 3073 3063 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 3074 AssertReturn(pu32CodeBuf, UINT32_MAX); 3075 AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5), UINT32_MAX); 3064 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5); 3076 3065 pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 0, f64Bit); 3077 3066 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); … … 3089 3078 * The operand size is given by @a f64Bit. 3090 3079 */ 3091 DECL INLINE(uint32_t) iemNativeEmitTestIfGprIsZeroAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc,3092 bool f64Bit, IEMNATIVELABELTYPE enmLabelType,3093 3080 DECL_INLINE_THROW(uint32_t) 3081 iemNativeEmitTestIfGprIsZeroAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc, bool f64Bit, 3082 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0) 3094 3083 { 3095 3084 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData); 3096 AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);3097 3085 return iemNativeEmitTestIfGprIsZeroAndJmpToLabel(pReNative, off, iGprSrc, f64Bit, idxLabel); 3098 3086 } … … 3103 3091 * differs. 3104 3092 */ 3105 DECLINLINE(uint32_t) iemNativeEmitTestIfGprNotEqualGprAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3106 uint8_t iGprLeft, uint8_t iGprRight, uint32_t idxLabel) 3093 DECL_INLINE_THROW(uint32_t) 3094 iemNativeEmitTestIfGprNotEqualGprAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3095 uint8_t iGprLeft, uint8_t iGprRight, uint32_t idxLabel) 3107 3096 { 3108 3097 off = iemNativeEmitCmpGprWithGpr(pReNative, off, iGprLeft, iGprRight); … … 3115 3104 * Emits code that jumps to a new label if @a iGprLeft and @a iGprRight differs. 3116 3105 */ 3117 DECLINLINE(uint32_t) iemNativeEmitTestIfGprNotEqualGprAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3118 uint8_t iGprLeft, uint8_t iGprRight, 3119 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0) 3106 DECL_INLINE_THROW(uint32_t) 3107 iemNativeEmitTestIfGprNotEqualGprAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3108 uint8_t iGprLeft, uint8_t iGprRight, 3109 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0) 3120 3110 { 3121 3111 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData); 3122 AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);3123 3112 return iemNativeEmitTestIfGprNotEqualGprAndJmpToLabel(pReNative, off, iGprLeft, iGprRight, idxLabel); 3124 3113 } … … 3128 3117 * Emits code that jumps to the given label if @a iGprSrc differs from @a uImm. 3129 3118 */ 3130 DECLINLINE(uint32_t) iemNativeEmitTestIfGprNotEqualImmAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3131 uint8_t iGprSrc, uint64_t uImm, uint32_t idxLabel) 3119 DECL_INLINE_THROW(uint32_t) 3120 iemNativeEmitTestIfGprNotEqualImmAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3121 uint8_t iGprSrc, uint64_t uImm, uint32_t idxLabel) 3132 3122 { 3133 3123 off = iemNativeEmitCmpGprWithImm(pReNative, off, iGprSrc, uImm); … … 3140 3130 * Emits code that jumps to a new label if @a iGprSrc differs from @a uImm. 3141 3131 */ 3142 DECLINLINE(uint32_t) iemNativeEmitTestIfGprNotEqualImmAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3143 uint8_t iGprSrc, uint64_t uImm, 3144 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0) 3132 DECL_INLINE_THROW(uint32_t) 3133 iemNativeEmitTestIfGprNotEqualImmAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3134 uint8_t iGprSrc, uint64_t uImm, 3135 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0) 3145 3136 { 3146 3137 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData); 3147 AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);3148 3138 return iemNativeEmitTestIfGprNotEqualImmAndJmpToLabel(pReNative, off, iGprSrc, uImm, idxLabel); 3149 3139 } … … 3154 3144 * @a uImm. 3155 3145 */ 3156 DECL INLINE(uint32_t) iemNativeEmitTestIfGpr32NotEqualImmAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,3157 uint8_t iGprSrc, uint32_t uImm, uint32_t idxLabel)3146 DECL_INLINE_THROW(uint32_t) iemNativeEmitTestIfGpr32NotEqualImmAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3147 uint8_t iGprSrc, uint32_t uImm, uint32_t idxLabel) 3158 3148 { 3159 3149 off = iemNativeEmitCmpGpr32WithImm(pReNative, off, iGprSrc, uImm); … … 3167 3157 * @a uImm. 3168 3158 */ 3169 DECLINLINE(uint32_t) iemNativeEmitTestIfGpr32NotEqualImmAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3170 uint8_t iGprSrc, uint32_t uImm, 3171 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0) 3159 DECL_INLINE_THROW(uint32_t) 3160 iemNativeEmitTestIfGpr32NotEqualImmAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3161 uint8_t iGprSrc, uint32_t uImm, 3162 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0) 3172 3163 { 3173 3164 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData); 3174 AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);3175 3165 return iemNativeEmitTestIfGpr32NotEqualImmAndJmpToLabel(pReNative, off, iGprSrc, uImm, idxLabel); 3176 3166 } … … 3181 3171 * Emits a call to a 64-bit address. 3182 3172 */ 3183 DECL INLINE(uint32_t) iemNativeEmitCallImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uintptr_t uPfn)3173 DECL_INLINE_THROW(uint32_t) iemNativeEmitCallImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uintptr_t uPfn) 3184 3174 { 3185 3175 #ifdef RT_ARCH_AMD64 … … 3188 3178 /* call rax */ 3189 3179 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 3190 AssertReturn(pbCodeBuf, UINT32_MAX);3191 3180 pbCodeBuf[off++] = 0xff; 3192 3181 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, X86_GREG_xAX); … … 3196 3185 3197 3186 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 3198 AssertReturn(pu32CodeBuf, UINT32_MAX);3199 3187 pu32CodeBuf[off++] = Armv8A64MkInstrBlr(IEMNATIVE_REG_FIXED_TMP0); 3188 3200 3189 #else 3201 3190 # error "port me"
Note:
See TracChangeset
for help on using the changeset viewer.