- Timestamp:
- Jun 26, 2016 10:12:23 PM (8 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r61671 r61885 332 332 #endif 333 333 334 if "$(USERNAME)" == "bird" && "$(KBUILD_TARGET)" == "win" 335 VBoxVMM_VMMAll/IEMAll.cpp_CXXFLAGS = /FAcs /Fa$(subst /,\\,$(outbase).cod) 336 endif 337 334 338 $(call VBOX_SET_VER_INFO_DLL,VBoxVMM,VirtualBox VMM) # Version info / description. 335 339 … … 543 547 544 548 $(call VBOX_SET_VER_INFO_RC,VMMRC,VirtualBox VMM - raw-mode context parts) # Version info / description. 549 550 if "$(USERNAME)" == "bird" && "$(KBUILD_TARGET)" == "win" 551 VMMRC_VMMAll/IEMAll.cpp_CXXFLAGS = /FAcs /Fa$(subst /,\\,$(outbase).cod) 552 endif 545 553 endif # VBOX_WITH_RAW_MODE && !VBOX_ONLY_EXTPACKS 546 554 … … 677 685 678 686 $(call VBOX_SET_VER_INFO_R0,VMMR0,VirtualBox VMM - ring-0 context parts) # Version info / description. 687 688 if "$(USERNAME)" == "bird" && "$(KBUILD_TARGET)" == "win" 689 VMMR0_VMMAll/IEMAll.cpp_CXXFLAGS = /FAcs /Fa$(subst /,\\,$(outbase).cod) 690 endif 679 691 endif # !VBOX_ONLY_EXTPACKS 692 680 693 681 694 -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r61657 r61885 82 82 //#define IEM_LOG_MEMORY_WRITES 83 83 #define IEM_IMPLEMENTS_TASKSWITCH 84 85 84 86 85 /********************************************************************************************************************************* … … 121 120 122 121 123 124 122 /********************************************************************************************************************************* 125 123 * Structures and Typedefs * … … 138 136 */ 139 137 138 /** @typedef PFNIEMOPRM 139 * Pointer to an opcode decoder function with RM byte. 140 */ 141 142 /** @def FNIEMOPRM_DEF 143 * Define an opcode decoder function with RM byte. 144 * 145 * We're using macors for this so that adding and removing parameters as well as 146 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1 147 * 148 * @param a_Name The function name. 149 */ 140 150 141 151 #if defined(__GNUC__) && defined(RT_ARCH_X86) 142 152 typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu); 153 typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PIEMCPU pIemCpu, uint8_t bRm); 143 154 # define FNIEMOP_DEF(a_Name) \ 144 155 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu) … … 150 161 #elif defined(_MSC_VER) && defined(RT_ARCH_X86) 151 162 typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu); 163 typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PIEMCPU pIemCpu, uint8_t bRm); 152 164 # define FNIEMOP_DEF(a_Name) \ 153 165 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF … … 159 171 #elif defined(__GNUC__) 160 172 typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu); 173 typedef VBOXSTRICTRC (* PFNIEMOPRM)(PIEMCPU pIemCpu, uint8_t bRm); 161 174 # define FNIEMOP_DEF(a_Name) \ 162 175 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu) … … 168 181 #else 169 182 typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu); 183 typedef VBOXSTRICTRC (* PFNIEMOPRM)(PIEMCPU pIemCpu, uint8_t bRm); 170 184 # define FNIEMOP_DEF(a_Name) \ 171 185 IEM_STATIC VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW_DEF … … 176 190 177 191 #endif 192 #define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm) 178 193 179 194 … … 195 210 * Defined Constants And Macros * 196 211 *********************************************************************************************************************************/ 212 /** @def IEM_WITH_SETJMP 213 * Enables alternative status code handling using setjmps. 214 * 215 * This adds a bit of expense via the setjmp() call since it saves all the 216 * non-volatile registers. However, it eliminates return code checks and allows 217 * for more optimal return value passing (return regs instead of stack buffer). 218 */ 219 #if defined(DOXYGEN_RUNNING) 220 # define IEM_WITH_SETJMP 221 #endif 222 197 223 /** Temporary hack to disable the double execution. Will be removed in favor 198 224 * of a dedicated execution mode in EM. */ … … 1250 1276 } 1251 1277 1278 #ifndef IEM_WITH_SETJMP 1252 1279 1253 1280 /** … … 1292 1319 } 1293 1320 1321 #else /* IEM_WITH_SETJMP */ 1322 1323 /** 1324 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like. 1325 * 1326 * @returns Strict VBox status code. 1327 * @param pIemCpu The IEM state. 1328 * @param pb Where to return the opcode byte. 1329 */ 1330 DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PIEMCPU pIemCpu) 1331 { 1332 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1); 1333 if (rcStrict == VINF_SUCCESS) 1334 return pIemCpu->abOpcode[pIemCpu->offOpcode++]; 1335 longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 1336 } 1337 1338 1339 /** 1340 * Fetches the next opcode byte. 1341 * 1342 * @returns Strict VBox status code. 1343 * @param pIemCpu The IEM state. 1344 * @param pu8 Where to return the opcode byte. 1345 */ 1346 DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PIEMCPU pIemCpu) 1347 { 1348 unsigned offOpcode = pIemCpu->offOpcode; 1349 if (RT_LIKELY((uint8_t)offOpcode < pIemCpu->cbOpcode)) 1350 { 1351 pIemCpu->offOpcode = (uint8_t)offOpcode + 1; 1352 return pIemCpu->abOpcode[offOpcode]; 1353 } 1354 return iemOpcodeGetNextU8SlowJmp(pIemCpu); 1355 } 1356 1357 #endif /* IEM_WITH_SETJMP */ 1294 1358 1295 1359 /** … … 1299 1363 * @remark Implicitly references pIemCpu. 1300 1364 */ 1301 #define IEM_OPCODE_GET_NEXT_U8(a_pu8) \ 1365 #ifndef IEM_WITH_SETJMP 1366 # define IEM_OPCODE_GET_NEXT_U8(a_pu8) \ 1302 1367 do \ 1303 1368 { \ 1304 1369 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \ 1305 if (rcStrict2 != VINF_SUCCESS) \ 1370 if (rcStrict2 == VINF_SUCCESS) \ 1371 { /* likely */ } \ 1372 else \ 1306 1373 return rcStrict2; \ 1307 1374 } while (0) 1308 1309 1375 #else 1376 # define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pIemCpu)) 1377 #endif /* IEM_WITH_SETJMP */ 1378 1379 1380 #ifndef IEM_WITH_SETJMP 1310 1381 /** 1311 1382 * Fetches the next signed byte from the opcode stream. … … 1319 1390 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8); 1320 1391 } 1392 #endif /* !IEM_WITH_SETJMP */ 1321 1393 1322 1394 … … 1328 1400 * @remark Implicitly references pIemCpu. 1329 1401 */ 1330 #define IEM_OPCODE_GET_NEXT_S8(a_pi8) \ 1402 #ifndef IEM_WITH_SETJMP 1403 # define IEM_OPCODE_GET_NEXT_S8(a_pi8) \ 1331 1404 do \ 1332 1405 { \ … … 1335 1408 return rcStrict2; \ 1336 1409 } while (0) 1337 1410 #else /* IEM_WITH_SETJMP */ 1411 # define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pIemCpu)) 1412 1413 #endif /* IEM_WITH_SETJMP */ 1414 1415 #ifndef IEM_WITH_SETJMP 1338 1416 1339 1417 /** … … 1373 1451 } 1374 1452 1453 #endif /* !IEM_WITH_SETJMP */ 1375 1454 1376 1455 /** … … 1381 1460 * @remark Implicitly references pIemCpu. 1382 1461 */ 1383 #define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \ 1462 #ifndef IEM_WITH_SETJMP 1463 # define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \ 1384 1464 do \ 1385 1465 { \ … … 1388 1468 return rcStrict2; \ 1389 1469 } while (0) 1390 1470 #else 1471 # define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pIemCpu)) 1472 #endif 1473 1474 #ifndef IEM_WITH_SETJMP 1391 1475 1392 1476 /** … … 1426 1510 } 1427 1511 1512 #endif /* !IEM_WITH_SETJMP */ 1428 1513 1429 1514 /** … … 1434 1519 * @remark Implicitly references pIemCpu. 1435 1520 */ 1521 #ifndef IEM_WITH_SETJMP 1436 1522 #define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \ 1437 1523 do \ … … 1441 1527 return rcStrict2; \ 1442 1528 } while (0) 1443 1529 #else 1530 # define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pIemCpu)) 1531 #endif 1532 1533 #ifndef IEM_WITH_SETJMP 1444 1534 1445 1535 /** … … 1479 1569 } 1480 1570 1571 #endif /* !IEM_WITH_SETJMP */ 1572 1481 1573 1482 1574 /** … … 1487 1579 * @remark Implicitly references pIemCpu. 1488 1580 */ 1489 #define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \ 1581 #ifndef IEM_WITH_SETJMP 1582 # define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \ 1490 1583 do \ 1491 1584 { \ … … 1494 1587 return rcStrict2; \ 1495 1588 } while (0) 1496 1589 #else 1590 # define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pIemCpu)) 1591 #endif 1592 1593 1594 #ifndef IEM_WITH_SETJMP 1497 1595 1498 1596 /** … … 1536 1634 } 1537 1635 1636 #else /* IEM_WITH_SETJMP */ 1637 1638 /** 1639 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like. 1640 * 1641 * @returns Strict VBox status code. 1642 * @param pIemCpu The IEM state. 1643 * @param pu16 Where to return the opcode word. 1644 */ 1645 DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PIEMCPU pIemCpu) 1646 { 1647 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2); 1648 if (rcStrict == VINF_SUCCESS) 1649 { 1650 uint8_t offOpcode = pIemCpu->offOpcode; 1651 pIemCpu->offOpcode += 2; 1652 return RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]); 1653 } 1654 longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 1655 } 1656 1657 1658 /** 1659 * Fetches the next opcode word. 1660 * 1661 * @returns Strict VBox status code. 1662 * @param pIemCpu The IEM state. 1663 * @param pu16 Where to return the opcode word. 1664 */ 1665 DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PIEMCPU pIemCpu) 1666 { 1667 uint8_t const offOpcode = pIemCpu->offOpcode; 1668 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode)) 1669 return iemOpcodeGetNextU16SlowJmp(pIemCpu); 1670 1671 pIemCpu->offOpcode = offOpcode + 2; 1672 return RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]); 1673 } 1674 1675 #endif /* IEM_WITH_SETJMP */ 1676 1538 1677 1539 1678 /** … … 1543 1682 * @remark Implicitly references pIemCpu. 1544 1683 */ 1545 #define IEM_OPCODE_GET_NEXT_U16(a_pu16) \ 1684 #ifndef IEM_WITH_SETJMP 1685 # define IEM_OPCODE_GET_NEXT_U16(a_pu16) \ 1546 1686 do \ 1547 1687 { \ … … 1550 1690 return rcStrict2; \ 1551 1691 } while (0) 1552 1692 #else 1693 # define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pIemCpu)) 1694 #endif 1695 1696 #ifndef IEM_WITH_SETJMP 1553 1697 1554 1698 /** … … 1592 1736 } 1593 1737 1738 #endif /* !IEM_WITH_SETJMP */ 1739 1594 1740 1595 1741 /** … … 1600 1746 * @remark Implicitly references pIemCpu. 1601 1747 */ 1602 #define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \ 1748 #ifndef IEM_WITH_SETJMP 1749 # define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \ 1603 1750 do \ 1604 1751 { \ … … 1607 1754 return rcStrict2; \ 1608 1755 } while (0) 1609 1756 #else 1757 # define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = (int16_t)iemOpcodeGetNextU16Jmp(pIemCpu)) 1758 #endif 1759 1760 #ifndef IEM_WITH_SETJMP 1610 1761 1611 1762 /** … … 1649 1800 } 1650 1801 1802 #endif /* !IEM_WITH_SETJMP */ 1651 1803 1652 1804 /** … … 1657 1809 * @remark Implicitly references pIemCpu. 1658 1810 */ 1659 #define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \ 1811 #ifndef IEM_WITH_SETJMP 1812 # define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \ 1660 1813 do \ 1661 1814 { \ … … 1664 1817 return rcStrict2; \ 1665 1818 } while (0) 1666 1667 1819 #else 1820 # define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = (int16_t)iemOpcodeGetNextU16Jmp(pIemCpu)) 1821 #endif 1822 1823 1824 #ifndef IEM_WITH_SETJMP 1668 1825 /** 1669 1826 * Fetches the next signed word from the opcode stream. … … 1677 1834 return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16); 1678 1835 } 1836 #endif /* !IEM_WITH_SETJMP */ 1679 1837 1680 1838 … … 1686 1844 * @remark Implicitly references pIemCpu. 1687 1845 */ 1688 #define IEM_OPCODE_GET_NEXT_S16(a_pi16) \ 1846 #ifndef IEM_WITH_SETJMP 1847 # define IEM_OPCODE_GET_NEXT_S16(a_pi16) \ 1689 1848 do \ 1690 1849 { \ … … 1693 1852 return rcStrict2; \ 1694 1853 } while (0) 1695 1854 #else 1855 # define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pIemCpu)) 1856 #endif 1857 1858 #ifndef IEM_WITH_SETJMP 1696 1859 1697 1860 /** … … 1741 1904 } 1742 1905 1906 #else /* !IEM_WITH_SETJMP */ 1907 1908 /** 1909 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like. 1910 * 1911 * @returns Strict VBox status code. 1912 * @param pIemCpu The IEM state. 1913 * @param pu32 Where to return the opcode dword. 1914 */ 1915 DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PIEMCPU pIemCpu) 1916 { 1917 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4); 1918 if (rcStrict == VINF_SUCCESS) 1919 { 1920 uint8_t offOpcode = pIemCpu->offOpcode; 1921 pIemCpu->offOpcode = offOpcode + 4; 1922 return RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode], 1923 pIemCpu->abOpcode[offOpcode + 1], 1924 pIemCpu->abOpcode[offOpcode + 2], 1925 pIemCpu->abOpcode[offOpcode + 3]); 1926 } 1927 longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 1928 } 1929 1930 1931 /** 1932 * Fetches the next opcode dword. 1933 * 1934 * @returns Strict VBox status code. 1935 * @param pIemCpu The IEM state. 1936 * @param pu32 Where to return the opcode double word. 1937 */ 1938 DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PIEMCPU pIemCpu) 1939 { 1940 uint8_t const offOpcode = pIemCpu->offOpcode; 1941 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode)) 1942 return iemOpcodeGetNextU32SlowJmp(pIemCpu); 1943 1944 pIemCpu->offOpcode = offOpcode + 4; 1945 return RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode], 1946 pIemCpu->abOpcode[offOpcode + 1], 1947 pIemCpu->abOpcode[offOpcode + 2], 1948 pIemCpu->abOpcode[offOpcode + 3]); 1949 } 1950 1951 #endif /* !IEM_WITH_SETJMP */ 1952 1743 1953 1744 1954 /** … … 1748 1958 * @remark Implicitly references pIemCpu. 1749 1959 */ 1750 #define IEM_OPCODE_GET_NEXT_U32(a_pu32) \ 1960 #ifndef IEM_WITH_SETJMP 1961 # define IEM_OPCODE_GET_NEXT_U32(a_pu32) \ 1751 1962 do \ 1752 1963 { \ … … 1755 1966 return rcStrict2; \ 1756 1967 } while (0) 1757 1968 #else 1969 # define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pIemCpu)) 1970 #endif 1971 1972 #ifndef IEM_WITH_SETJMP 1758 1973 1759 1974 /** … … 1803 2018 } 1804 2019 2020 #endif /* !IEM_WITH_SETJMP */ 2021 1805 2022 1806 2023 /** … … 1811 2028 * @remark Implicitly references pIemCpu. 1812 2029 */ 1813 #define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \ 2030 #ifndef IEM_WITH_SETJMP 2031 # define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \ 1814 2032 do \ 1815 2033 { \ … … 1818 2036 return rcStrict2; \ 1819 2037 } while (0) 1820 1821 2038 #else 2039 # define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pIemCpu)) 2040 #endif 2041 2042 2043 #ifndef IEM_WITH_SETJMP 1822 2044 /** 1823 2045 * Fetches the next signed double word from the opcode stream. … … 1831 2053 return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32); 1832 2054 } 2055 #endif 1833 2056 1834 2057 /** … … 1839 2062 * @remark Implicitly references pIemCpu. 1840 2063 */ 1841 #define IEM_OPCODE_GET_NEXT_S32(a_pi32) \ 2064 #ifndef IEM_WITH_SETJMP 2065 # define IEM_OPCODE_GET_NEXT_S32(a_pi32) \ 1842 2066 do \ 1843 2067 { \ … … 1846 2070 return rcStrict2; \ 1847 2071 } while (0) 1848 2072 #else 2073 # define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pIemCpu)) 2074 #endif 2075 2076 #ifndef IEM_WITH_SETJMP 1849 2077 1850 2078 /** … … 1895 2123 } 1896 2124 2125 #endif /* !IEM_WITH_SETJMP */ 2126 1897 2127 1898 2128 /** … … 1903 2133 * @remark Implicitly references pIemCpu. 1904 2134 */ 1905 #define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \ 2135 #ifndef IEM_WITH_SETJMP 2136 # define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \ 1906 2137 do \ 1907 2138 { \ … … 1910 2141 return rcStrict2; \ 1911 2142 } while (0) 1912 2143 #else 2144 # define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pIemCpu)) 2145 #endif 2146 2147 #ifndef IEM_WITH_SETJMP 1913 2148 1914 2149 /** … … 1966 2201 } 1967 2202 2203 #else /* IEM_WITH_SETJMP */ 2204 2205 /** 2206 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like. 2207 * 2208 * @returns Strict VBox status code. 2209 * @param pIemCpu The IEM state. 2210 * @param pu64 Where to return the opcode qword. 2211 */ 2212 DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PIEMCPU pIemCpu) 2213 { 2214 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8); 2215 if (rcStrict == VINF_SUCCESS) 2216 { 2217 uint8_t offOpcode = pIemCpu->offOpcode; 2218 pIemCpu->offOpcode = offOpcode + 8; 2219 return RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode], 2220 pIemCpu->abOpcode[offOpcode + 1], 2221 pIemCpu->abOpcode[offOpcode + 2], 2222 pIemCpu->abOpcode[offOpcode + 3], 2223 pIemCpu->abOpcode[offOpcode + 4], 2224 pIemCpu->abOpcode[offOpcode + 5], 2225 pIemCpu->abOpcode[offOpcode + 6], 2226 pIemCpu->abOpcode[offOpcode + 7]); 2227 } 2228 longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 2229 } 2230 2231 2232 /** 2233 * Fetches the next opcode qword. 2234 * 2235 * @returns Strict VBox status code. 2236 * @param pIemCpu The IEM state. 2237 * @param pu64 Where to return the opcode qword. 2238 */ 2239 DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PIEMCPU pIemCpu) 2240 { 2241 uint8_t const offOpcode = pIemCpu->offOpcode; 2242 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode)) 2243 return iemOpcodeGetNextU64SlowJmp(pIemCpu); 2244 2245 pIemCpu->offOpcode = offOpcode + 8; 2246 return RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode], 2247 pIemCpu->abOpcode[offOpcode + 1], 2248 pIemCpu->abOpcode[offOpcode + 2], 2249 pIemCpu->abOpcode[offOpcode + 3], 2250 pIemCpu->abOpcode[offOpcode + 4], 2251 pIemCpu->abOpcode[offOpcode + 5], 2252 pIemCpu->abOpcode[offOpcode + 6], 2253 pIemCpu->abOpcode[offOpcode + 7]); 2254 } 2255 2256 #endif /* IEM_WITH_SETJMP */ 1968 2257 1969 2258 /** … … 1973 2262 * @remark Implicitly references pIemCpu. 1974 2263 */ 1975 #define IEM_OPCODE_GET_NEXT_U64(a_pu64) \ 2264 #ifndef IEM_WITH_SETJMP 2265 # define IEM_OPCODE_GET_NEXT_U64(a_pu64) \ 1976 2266 do \ 1977 2267 { \ … … 1980 2270 return rcStrict2; \ 1981 2271 } while (0) 2272 #else 2273 # define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pIemCpu) ) 2274 #endif 1982 2275 1983 2276 … … 7256 7549 } 7257 7550 7551 #ifdef IEM_WITH_SETJMP 7552 7553 /** 7554 * Maps the specified guest memory for the given kind of access, longjmp on 7555 * error. 7556 * 7557 * This may be using bounce buffering of the memory if it's crossing a page 7558 * boundary or if there is an access handler installed for any of it. Because 7559 * of lock prefix guarantees, we're in for some extra clutter when this 7560 * happens. 7561 * 7562 * This may raise a \#GP, \#SS, \#PF or \#AC. 7563 * 7564 * @returns Pointer to the mapped memory. 7565 * 7566 * @param pIemCpu The IEM per CPU data. 7567 * @param cbMem The number of bytes to map. This is usually 1, 7568 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by 7569 * string operations it can be up to a page. 7570 * @param iSegReg The index of the segment register to use for 7571 * this access. The base and limits are checked. 7572 * Use UINT8_MAX to indicate that no segmentation 7573 * is required (for IDT, GDT and LDT accesses). 7574 * @param GCPtrMem The address of the guest memory. 7575 * @param fAccess How the memory is being accessed. The 7576 * IEM_ACCESS_TYPE_XXX bit is used to figure out 7577 * how to map the memory, while the 7578 * IEM_ACCESS_WHAT_XXX bit is used when raising 7579 * exceptions. 7580 */ 7581 IEM_STATIC void *iemMemMapJmp(PIEMCPU pIemCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) 7582 { 7583 /* 7584 * Check the input and figure out which mapping entry to use. 7585 */ 7586 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */ 7587 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK))); 7588 Assert(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings)); 7589 7590 unsigned iMemMap = pIemCpu->iNextMapping; 7591 if ( iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings) 7592 || pIemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID) 7593 { 7594 iMemMap = iemMemMapFindFree(pIemCpu); 7595 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), 7596 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pIemCpu->cActiveMappings, 7597 pIemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess, 7598 pIemCpu->aMemMappings[2].fAccess), 7599 longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9)); 7600 } 7601 7602 /* 7603 * Map the memory, checking that we can actually access it. If something 7604 * slightly complicated happens, fall back on bounce buffering. 7605 */ 7606 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem); 7607 if (rcStrict == VINF_SUCCESS) { /*likely*/ } 7608 else longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 7609 7610 /* Crossing a page boundary? */ 7611 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE) 7612 { /* No (likely). */ } 7613 else 7614 { 7615 void *pvMem; 7616 VBOXSTRICTRC rcStrict = iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess); 7617 if (rcStrict == VINF_SUCCESS) 7618 return pvMem; 7619 longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 7620 } 7621 7622 RTGCPHYS GCPhysFirst; 7623 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst); 7624 if (rcStrict == VINF_SUCCESS) { /*likely*/ } 7625 else longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 7626 7627 if (fAccess & IEM_ACCESS_TYPE_WRITE) 7628 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem)); 7629 if (fAccess & IEM_ACCESS_TYPE_READ) 7630 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem)); 7631 7632 void *pvMem; 7633 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock); 7634 if (rcStrict == VINF_SUCCESS) 7635 { /* likely */ } 7636 else 7637 { 7638 void *pvMem; 7639 rcStrict = iemMemBounceBufferMapPhys(pIemCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict); 7640 if (rcStrict == VINF_SUCCESS) 7641 return pvMem; 7642 longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 7643 } 7644 7645 /* 7646 * Fill in the mapping table entry. 7647 */ 7648 pIemCpu->aMemMappings[iMemMap].pv = pvMem; 7649 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess; 7650 pIemCpu->iNextMapping = iMemMap + 1; 7651 pIemCpu->cActiveMappings++; 7652 7653 iemMemUpdateWrittenCounter(pIemCpu, fAccess, cbMem); 7654 return pvMem; 7655 } 7656 7657 7658 /** 7659 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error. 7660 * 7661 * @param pIemCpu The IEM per CPU data. 7662 * @param pvMem The mapping. 7663 * @param fAccess The kind of access. 7664 */ 7665 IEM_STATIC void iemMemCommitAndUnmapJmp(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess) 7666 { 7667 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess); 7668 AssertStmt(iMemMap >= 0, longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), iMemMap)); 7669 7670 /* If it's bounce buffered, we may need to write back the buffer. */ 7671 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED) 7672 { 7673 if (pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE) 7674 { 7675 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap, false /*fPostponeFail*/); 7676 if (rcStrict == VINF_SUCCESS) 7677 return; 7678 longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 7679 } 7680 } 7681 /* Otherwise unlock it. */ 7682 else 7683 PGMPhysReleasePageMappingLock(IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock); 7684 7685 /* Free the entry. */ 7686 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID; 7687 Assert(pIemCpu->cActiveMappings != 0); 7688 pIemCpu->cActiveMappings--; 7689 } 7690 7691 #endif 7258 7692 7259 7693 #ifndef IN_RING3 … … 7348 7782 7349 7783 7784 #ifdef IEM_WITH_SETJMP 7785 /** 7786 * Fetches a data byte, longjmp on error. 7787 * 7788 * @returns The byte. 7789 * @param pIemCpu The IEM per CPU data. 7790 * @param iSegReg The index of the segment register to use for 7791 * this access. The base and limits are checked. 7792 * @param GCPtrMem The address of the guest memory. 7793 */ 7794 DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) 7795 { 7796 /* The lazy approach for now... */ 7797 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pIemCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 7798 uint8_t const bRet = *pu8Src; 7799 iemMemCommitAndUnmapJmp(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R); 7800 return bRet; 7801 } 7802 #endif /* IEM_WITH_SETJMP */ 7803 7804 7350 7805 /** 7351 7806 * Fetches a data word. … … 7372 7827 7373 7828 7829 #ifdef IEM_WITH_SETJMP 7830 /** 7831 * Fetches a data word, longjmp on error. 7832 * 7833 * @returns The word 7834 * @param pIemCpu The IEM per CPU data. 7835 * @param iSegReg The index of the segment register to use for 7836 * this access. The base and limits are checked. 7837 * @param GCPtrMem The address of the guest memory. 7838 */ 7839 DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) 7840 { 7841 /* The lazy approach for now... */ 7842 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pIemCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 7843 uint16_t const u16Ret = *pu16Src; 7844 iemMemCommitAndUnmapJmp(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R); 7845 return u16Ret; 7846 } 7847 #endif 7848 7849 7374 7850 /** 7375 7851 * Fetches a data dword. … … 7396 7872 7397 7873 7874 #ifdef IEM_WITH_SETJMP 7875 /** 7876 * Fetches a data dword, longjmp on error. 7877 * 7878 * @returns The dword 7879 * @param pIemCpu The IEM per CPU data. 7880 * @param iSegReg The index of the segment register to use for 7881 * this access. The base and limits are checked. 7882 * @param GCPtrMem The address of the guest memory. 7883 */ 7884 DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) 7885 { 7886 /* The lazy approach for now... */ 7887 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pIemCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 7888 uint32_t const u32Ret = *pu32Src; 7889 iemMemCommitAndUnmapJmp(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R); 7890 return u32Ret; 7891 } 7892 #endif 7893 7894 7398 7895 #ifdef SOME_UNUSED_FUNCTION 7399 7896 /** … … 7450 7947 7451 7948 7949 #ifdef IEM_WITH_SETJMP 7950 /** 7951 * Fetches a data qword, longjmp on error. 7952 * 7953 * @returns The qword. 7954 * @param pIemCpu The IEM per CPU data. 7955 * @param iSegReg The index of the segment register to use for 7956 * this access. The base and limits are checked. 7957 * @param GCPtrMem The address of the guest memory. 7958 */ 7959 DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) 7960 { 7961 /* The lazy approach for now... */ 7962 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pIemCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 7963 uint64_t const u64Ret = *pu64Src; 7964 iemMemCommitAndUnmapJmp(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R); 7965 return u64Ret; 7966 } 7967 #endif 7968 7969 7452 7970 /** 7453 7971 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE). … … 7478 7996 7479 7997 7998 #ifdef IEM_WITH_SETJMP 7999 /** 8000 * Fetches a data qword, longjmp on error. 8001 * 8002 * @returns The qword. 8003 * @param pIemCpu The IEM per CPU data. 8004 * @param iSegReg The index of the segment register to use for 8005 * this access. The base and limits are checked. 8006 * @param GCPtrMem The address of the guest memory. 8007 */ 8008 DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) 8009 { 8010 /* The lazy approach for now... */ 8011 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */ 8012 if (RT_LIKELY(!(GCPtrMem & 15))) 8013 { 8014 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pIemCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8015 uint64_t const u64Ret = *pu64Src; 8016 iemMemCommitAndUnmapJmp(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R); 8017 return u64Ret; 8018 } 8019 8020 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pIemCpu); 8021 longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc)); 8022 } 8023 #endif 8024 8025 7480 8026 /** 7481 8027 * Fetches a data tword. … … 7502 8048 7503 8049 8050 #ifdef IEM_WITH_SETJMP 8051 /** 8052 * Fetches a data tword, longjmp on error. 8053 * 8054 * @param pIemCpu The IEM per CPU data. 8055 * @param pr80Dst Where to return the tword. 8056 * @param iSegReg The index of the segment register to use for 8057 * this access. The base and limits are checked. 8058 * @param GCPtrMem The address of the guest memory. 8059 */ 8060 DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 8061 { 8062 /* The lazy approach for now... */ 8063 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pIemCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8064 *pr80Dst = *pr80Src; 8065 iemMemCommitAndUnmapJmp(pIemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R); 8066 } 8067 #endif 8068 8069 7504 8070 /** 7505 8071 * Fetches a data dqword (double qword), generally SSE related. … … 7526 8092 7527 8093 8094 #ifdef IEM_WITH_SETJMP 8095 /** 8096 * Fetches a data dqword (double qword), generally SSE related. 8097 * 8098 * @param pIemCpu The IEM per CPU data. 8099 * @param pu128Dst Where to return the qword. 8100 * @param iSegReg The index of the segment register to use for 8101 * this access. The base and limits are checked. 8102 * @param GCPtrMem The address of the guest memory. 8103 */ 8104 IEM_STATIC void iemMemFetchDataU128Jmp(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 8105 { 8106 /* The lazy approach for now... */ 8107 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pIemCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8108 *pu128Dst = *pu128Src; 8109 iemMemCommitAndUnmapJmp(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); 8110 } 8111 #endif 8112 8113 7528 8114 /** 7529 8115 * Fetches a data dqword (double qword) at an aligned address, generally SSE … … 7557 8143 } 7558 8144 8145 8146 #ifdef IEM_WITH_SETJMP 8147 /** 8148 * Fetches a data dqword (double qword) at an aligned address, generally SSE 8149 * related, longjmp on error. 8150 * 8151 * Raises \#GP(0) if not aligned. 8152 * 8153 * @param pIemCpu The IEM per CPU data. 8154 * @param pu128Dst Where to return the qword. 8155 * @param iSegReg The index of the segment register to use for 8156 * this access. The base and limits are checked. 8157 * @param GCPtrMem The address of the guest memory. 8158 */ 8159 DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 8160 { 8161 /* The lazy approach for now... */ 8162 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */ 8163 if ( (GCPtrMem & 15) == 0 8164 || (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 8165 { 8166 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pIemCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, 8167 IEM_ACCESS_DATA_R); 8168 *pu128Dst = *pu128Src; 8169 iemMemCommitAndUnmapJmp(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); 8170 return; 8171 } 8172 8173 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pIemCpu); 8174 longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 8175 } 8176 #endif 7559 8177 7560 8178 … … 7656 8274 7657 8275 8276 #ifdef IEM_WITH_SETJMP 8277 /** 8278 * Stores a data byte, longjmp on error. 8279 * 8280 * @param pIemCpu The IEM per CPU data. 8281 * @param iSegReg The index of the segment register to use for 8282 * this access. The base and limits are checked. 8283 * @param GCPtrMem The address of the guest memory. 8284 * @param u8Value The value to store. 8285 */ 8286 IEM_STATIC void iemMemStoreDataU8Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) 8287 { 8288 /* The lazy approach for now... */ 8289 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pIemCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8290 *pu8Dst = u8Value; 8291 iemMemCommitAndUnmapJmp(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W); 8292 } 8293 #endif 8294 8295 7658 8296 /** 7659 8297 * Stores a data word. … … 7680 8318 7681 8319 8320 #ifdef IEM_WITH_SETJMP 8321 /** 8322 * Stores a data word, longjmp on error. 8323 * 8324 * @param pIemCpu The IEM per CPU data. 8325 * @param iSegReg The index of the segment register to use for 8326 * this access. The base and limits are checked. 8327 * @param GCPtrMem The address of the guest memory. 8328 * @param u16Value The value to store. 8329 */ 8330 IEM_STATIC void iemMemStoreDataU16Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) 8331 { 8332 /* The lazy approach for now... */ 8333 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pIemCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8334 *pu16Dst = u16Value; 8335 iemMemCommitAndUnmapJmp(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W); 8336 } 8337 #endif 8338 8339 7682 8340 /** 7683 8341 * Stores a data dword. … … 7704 8362 7705 8363 8364 #ifdef IEM_WITH_SETJMP 8365 /** 8366 * Stores a data dword. 8367 * 8368 * @returns Strict VBox status code. 8369 * @param pIemCpu The IEM per CPU data. 8370 * @param iSegReg The index of the segment register to use for 8371 * this access. The base and limits are checked. 8372 * @param GCPtrMem The address of the guest memory. 8373 * @param u32Value The value to store. 8374 */ 8375 IEM_STATIC void iemMemStoreDataU32Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) 8376 { 8377 /* The lazy approach for now... */ 8378 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pIemCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8379 *pu32Dst = u32Value; 8380 iemMemCommitAndUnmapJmp(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W); 8381 } 8382 #endif 8383 8384 7706 8385 /** 7707 8386 * Stores a data qword. … … 7728 8407 7729 8408 8409 #ifdef IEM_WITH_SETJMP 8410 /** 8411 * Stores a data qword, longjmp on error. 8412 * 8413 * @param pIemCpu The IEM per CPU data. 8414 * @param iSegReg The index of the segment register to use for 8415 * this access. The base and limits are checked. 8416 * @param GCPtrMem The address of the guest memory. 8417 * @param u64Value The value to store. 8418 */ 8419 IEM_STATIC void iemMemStoreDataU64Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) 8420 { 8421 /* The lazy approach for now... */ 8422 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pIemCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8423 *pu64Dst = u64Value; 8424 iemMemCommitAndUnmapJmp(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W); 8425 } 8426 #endif 8427 8428 7730 8429 /** 7731 8430 * Stores a data dqword. … … 7752 8451 7753 8452 8453 #ifdef IEM_WITH_SETJMP 8454 /** 8455 * Stores a data dqword, longjmp on error. 8456 * 8457 * @param pIemCpu The IEM per CPU data. 8458 * @param iSegReg The index of the segment register to use for 8459 * this access. The base and limits are checked. 8460 * @param GCPtrMem The address of the guest memory. 8461 * @param u128Value The value to store. 8462 */ 8463 IEM_STATIC void iemMemStoreDataU128Jmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value) 8464 { 8465 /* The lazy approach for now... */ 8466 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pIemCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8467 *pu128Dst = u128Value; 8468 iemMemCommitAndUnmapJmp(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W); 8469 } 8470 #endif 8471 8472 7754 8473 /** 7755 8474 * Stores a data dqword, SSE aligned. … … 7778 8497 return rc; 7779 8498 } 8499 8500 8501 #ifdef IEM_WITH_SETJMP 8502 /** 8503 * Stores a data dqword, SSE aligned. 8504 * 8505 * @returns Strict VBox status code. 8506 * @param pIemCpu The IEM per CPU data. 8507 * @param iSegReg The index of the segment register to use for 8508 * this access. The base and limits are checked. 8509 * @param GCPtrMem The address of the guest memory. 8510 * @param u128Value The value to store. 8511 */ 8512 DECL_NO_INLINE(IEM_STATIC, void) 8513 iemMemStoreDataU128AlignedSseJmp(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value) 8514 { 8515 /* The lazy approach for now... */ 8516 if ( (GCPtrMem & 15) == 0 8517 || (pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 8518 { 8519 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pIemCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8520 *pu128Dst = u128Value; 8521 iemMemCommitAndUnmapJmp(pIemCpu, pu128Dst, IEM_ACCESS_DATA_W); 8522 return; 8523 } 8524 8525 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pIemCpu); 8526 longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 8527 } 8528 #endif 7780 8529 7781 8530 … … 8666 9415 return rcStrict2; \ 8667 9416 } while (0) 9417 8668 9418 8669 9419 #define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pIemCpu) … … 8928 9678 = pIemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0) 8929 9679 8930 #define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \ 9680 #ifndef IEM_WITH_SETJMP 9681 # define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \ 8931 9682 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem))) 8932 # define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \9683 # define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \ 8933 9684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16))) 8934 # define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \9685 # define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \ 8935 9686 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32))) 8936 8937 #define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 9687 #else 9688 # define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \ 9689 ((a_u8Dst) = iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9690 # define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \ 9691 ((a_u8Dst) = iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem16))) 9692 # define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \ 9693 ((a_u8Dst) = iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem32))) 9694 #endif 9695 9696 #ifndef IEM_WITH_SETJMP 9697 # define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 8938 9698 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem))) 8939 # define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \9699 # define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 8940 9700 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 8941 # define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \9701 # define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \ 8942 9702 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem))) 8943 8944 #define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 9703 #else 9704 # define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 9705 ((a_u16Dst) = iemMemFetchDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9706 # define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 9707 ((a_u16Dst) = iemMemFetchDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 9708 # define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \ 9709 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9710 #endif 9711 9712 #ifndef IEM_WITH_SETJMP 9713 # define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 8945 9714 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem))) 8946 # define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \9715 # define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 8947 9716 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 8948 # define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \9717 # define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \ 8949 9718 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem))) 8950 8951 #define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9719 #else 9720 # define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 9721 ((a_u32Dst) = iemMemFetchDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9722 # define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 9723 ((a_u32Dst) = iemMemFetchDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 9724 # define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \ 9725 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9726 #endif 9727 9728 #ifdef SOME_UNUSED_FUNCTION 9729 # define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 8952 9730 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem))) 8953 8954 #define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9731 #endif 9732 9733 #ifndef IEM_WITH_SETJMP 9734 # define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 8955 9735 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem))) 8956 # define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \9736 # define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 8957 9737 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 8958 # define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \8959 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u 128Dst), (a_iSeg), (a_GCPtrMem)))8960 # define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \9738 # define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem))) 9740 # define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \ 8961 9741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem))) 8962 8963 #define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \ 9742 #else 9743 # define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9744 ((a_u64Dst) = iemMemFetchDataU64Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9745 # define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 9746 ((a_u64Dst) = iemMemFetchDataU64Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 9747 # define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9748 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9749 # define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \ 9750 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9751 #endif 9752 9753 #ifndef IEM_WITH_SETJMP 9754 # define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \ 8964 9755 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem))) 8965 # define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \9756 # define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \ 8966 9757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem))) 8967 # define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \9758 # define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \ 8968 9759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))) 8969 8970 #define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \ 9760 #else 9761 # define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \ 9762 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9763 # define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \ 9764 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9765 # define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \ 9766 iemMemFetchDataR80Jmp(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)) 9767 #endif 9768 9769 #ifndef IEM_WITH_SETJMP 9770 # define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \ 8971 9771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))) 8972 # define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \9772 # define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \ 8973 9773 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))) 8974 8975 8976 8977 #define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 9774 #else 9775 # define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \ 9776 iemMemFetchDataU128Jmp(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)) 9777 # define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \ 9778 iemMemFetchDataU128AlignedSseJmp(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)) 9779 #endif 9780 9781 9782 9783 #ifndef IEM_WITH_SETJMP 9784 # define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 8978 9785 do { \ 8979 9786 uint8_t u8Tmp; \ … … 8981 9788 (a_u16Dst) = u8Tmp; \ 8982 9789 } while (0) 8983 # define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \9790 # define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 8984 9791 do { \ 8985 9792 uint8_t u8Tmp; \ … … 8987 9794 (a_u32Dst) = u8Tmp; \ 8988 9795 } while (0) 8989 # define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \9796 # define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 8990 9797 do { \ 8991 9798 uint8_t u8Tmp; \ … … 8993 9800 (a_u64Dst) = u8Tmp; \ 8994 9801 } while (0) 8995 # define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \9802 # define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 8996 9803 do { \ 8997 9804 uint16_t u16Tmp; \ … … 8999 9806 (a_u32Dst) = u16Tmp; \ 9000 9807 } while (0) 9001 # define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \9808 # define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9002 9809 do { \ 9003 9810 uint16_t u16Tmp; \ … … 9005 9812 (a_u64Dst) = u16Tmp; \ 9006 9813 } while (0) 9007 # define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \9814 # define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9008 9815 do { \ 9009 9816 uint32_t u32Tmp; \ … … 9011 9818 (a_u64Dst) = u32Tmp; \ 9012 9819 } while (0) 9013 9014 #define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 9820 #else /* IEM_WITH_SETJMP */ 9821 # define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 9822 ((a_u16Dst) = iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9823 # define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 9824 ((a_u32Dst) = iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9825 # define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9826 ((a_u64Dst) = iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9827 # define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 9828 ((a_u32Dst) = iemMemFetchDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9829 # define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9830 ((a_u64Dst) = iemMemFetchDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9831 # define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9832 ((a_u64Dst) = iemMemFetchDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9833 #endif /* IEM_WITH_SETJMP */ 9834 9835 #ifndef IEM_WITH_SETJMP 9836 # define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 9015 9837 do { \ 9016 9838 uint8_t u8Tmp; \ … … 9018 9840 (a_u16Dst) = (int8_t)u8Tmp; \ 9019 9841 } while (0) 9020 # define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \9842 # define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 9021 9843 do { \ 9022 9844 uint8_t u8Tmp; \ … … 9024 9846 (a_u32Dst) = (int8_t)u8Tmp; \ 9025 9847 } while (0) 9026 # define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \9848 # define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9027 9849 do { \ 9028 9850 uint8_t u8Tmp; \ … … 9030 9852 (a_u64Dst) = (int8_t)u8Tmp; \ 9031 9853 } while (0) 9032 # define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \9854 # define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 9033 9855 do { \ 9034 9856 uint16_t u16Tmp; \ … … 9036 9858 (a_u32Dst) = (int16_t)u16Tmp; \ 9037 9859 } while (0) 9038 # define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \9860 # define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9039 9861 do { \ 9040 9862 uint16_t u16Tmp; \ … … 9042 9864 (a_u64Dst) = (int16_t)u16Tmp; \ 9043 9865 } while (0) 9044 # define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \9866 # define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9045 9867 do { \ 9046 9868 uint32_t u32Tmp; \ … … 9048 9870 (a_u64Dst) = (int32_t)u32Tmp; \ 9049 9871 } while (0) 9050 9051 #define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \ 9872 #else /* IEM_WITH_SETJMP */ 9873 # define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 9874 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9875 # define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 9876 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9877 # define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9878 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9879 # define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 9880 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9881 # define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9882 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9883 # define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9884 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem))) 9885 #endif /* IEM_WITH_SETJMP */ 9886 9887 #ifndef IEM_WITH_SETJMP 9888 # define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \ 9052 9889 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))) 9053 # define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \9890 # define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \ 9054 9891 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))) 9055 # define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \9892 # define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \ 9056 9893 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))) 9057 # define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \9894 # define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \ 9058 9895 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))) 9059 9060 #define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \ 9896 #else 9897 # define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \ 9898 iemMemStoreDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)) 9899 # define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \ 9900 iemMemStoreDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)) 9901 # define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \ 9902 iemMemStoreDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)) 9903 # define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \ 9904 iemMemStoreDataU64Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)) 9905 #endif 9906 9907 #ifndef IEM_WITH_SETJMP 9908 # define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \ 9061 9909 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))) 9062 # define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \9910 # define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \ 9063 9911 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))) 9064 # define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \9912 # define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \ 9065 9913 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))) 9066 # define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \9914 # define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \ 9067 9915 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))) 9916 #else 9917 # define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \ 9918 iemMemStoreDataU8Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)) 9919 # define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \ 9920 iemMemStoreDataU16Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)) 9921 # define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \ 9922 iemMemStoreDataU32Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)) 9923 # define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \ 9924 iemMemStoreDataU64Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)) 9925 #endif 9068 9926 9069 9927 #define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C) … … 9079 9937 } while (0) 9080 9938 9081 #define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \ 9939 #ifndef IEM_WITH_SETJMP 9940 # define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \ 9082 9941 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))) 9083 # define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \9942 # define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \ 9084 9943 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))) 9944 #else 9945 # define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \ 9946 iemMemStoreDataU128Jmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)) 9947 # define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \ 9948 iemMemStoreDataU128AlignedSseJmp(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)) 9949 #endif 9085 9950 9086 9951 … … 9139 10004 9140 10005 /** Calculate efficient address from R/M. */ 9141 #define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \ 10006 #ifndef IEM_WITH_SETJMP 10007 # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \ 9142 10008 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff))) 10009 #else 10010 # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \ 10011 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pIemCpu, (bRm), (cbImm))) 10012 #endif 9143 10013 9144 10014 #define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)() … … 9777 10647 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm)); 9778 10648 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 9779 # define SET_SS_DEF() \10649 # define SET_SS_DEF() \ 9780 10650 do \ 9781 10651 { \ … … 10059 10929 return VINF_SUCCESS; 10060 10930 } 10931 10932 10933 #ifdef IEM_WITH_SETJMP 10934 /** 10935 * Calculates the effective address of a ModR/M memory operand. 10936 * 10937 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR. 10938 * 10939 * May longjmp on internal error. 10940 * 10941 * @return The effective address. 10942 * @param pIemCpu The IEM per CPU data. 10943 * @param bRm The ModRM byte. 10944 * @param cbImm The size of any immediate following the 10945 * effective address opcode bytes. Important for 10946 * RIP relative addressing. 10947 */ 10948 IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm) 10949 { 10950 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm)); 10951 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 10952 # define SET_SS_DEF() \ 10953 do \ 10954 { \ 10955 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \ 10956 pIemCpu->iEffSeg = X86_SREG_SS; \ 10957 } while (0) 10958 10959 if (pIemCpu->enmCpuMode != IEMMODE_64BIT) 10960 { 10961 /** @todo Check the effective address size crap! */ 10962 if (pIemCpu->enmEffAddrMode == IEMMODE_16BIT) 10963 { 10964 uint16_t u16EffAddr; 10965 10966 /* Handle the disp16 form with no registers first. */ 10967 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6) 10968 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); 10969 else 10970 { 10971 /* Get the displacment. */ 10972 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 10973 { 10974 case 0: u16EffAddr = 0; break; 10975 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break; 10976 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break; 10977 default: AssertFailedStmt(longjmp(*pIemCpu->CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */ 10978 } 10979 10980 /* Add the base and index registers to the disp. */ 10981 switch (bRm & X86_MODRM_RM_MASK) 10982 { 10983 case 0: u16EffAddr += pCtx->bx + pCtx->si; break; 10984 case 1: u16EffAddr += pCtx->bx + pCtx->di; break; 10985 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break; 10986 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break; 10987 case 4: u16EffAddr += pCtx->si; break; 10988 case 5: u16EffAddr += pCtx->di; break; 10989 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break; 10990 case 7: u16EffAddr += pCtx->bx; break; 10991 } 10992 } 10993 10994 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr)); 10995 return u16EffAddr; 10996 } 10997 10998 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT); 10999 uint32_t u32EffAddr; 11000 11001 /* Handle the disp32 form with no registers first. */ 11002 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) 11003 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr); 11004 else 11005 { 11006 /* Get the register (or SIB) value. */ 11007 switch ((bRm & X86_MODRM_RM_MASK)) 11008 { 11009 case 0: u32EffAddr = pCtx->eax; break; 11010 case 1: u32EffAddr = pCtx->ecx; break; 11011 case 2: u32EffAddr = pCtx->edx; break; 11012 case 3: u32EffAddr = pCtx->ebx; break; 11013 case 4: /* SIB */ 11014 { 11015 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib); 11016 11017 /* Get the index and scale it. */ 11018 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) 11019 { 11020 case 0: u32EffAddr = pCtx->eax; break; 11021 case 1: u32EffAddr = pCtx->ecx; break; 11022 case 2: u32EffAddr = pCtx->edx; break; 11023 case 3: u32EffAddr = pCtx->ebx; break; 11024 case 4: u32EffAddr = 0; /*none */ break; 11025 case 5: u32EffAddr = pCtx->ebp; break; 11026 case 6: u32EffAddr = pCtx->esi; break; 11027 case 7: u32EffAddr = pCtx->edi; break; 11028 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11029 } 11030 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 11031 11032 /* add base */ 11033 switch (bSib & X86_SIB_BASE_MASK) 11034 { 11035 case 0: u32EffAddr += pCtx->eax; break; 11036 case 1: u32EffAddr += pCtx->ecx; break; 11037 case 2: u32EffAddr += pCtx->edx; break; 11038 case 3: u32EffAddr += pCtx->ebx; break; 11039 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break; 11040 case 5: 11041 if ((bRm & X86_MODRM_MOD_MASK) != 0) 11042 { 11043 u32EffAddr += pCtx->ebp; 11044 SET_SS_DEF(); 11045 } 11046 else 11047 { 11048 uint32_t u32Disp; 11049 IEM_OPCODE_GET_NEXT_U32(&u32Disp); 11050 u32EffAddr += u32Disp; 11051 } 11052 break; 11053 case 6: u32EffAddr += pCtx->esi; break; 11054 case 7: u32EffAddr += pCtx->edi; break; 11055 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11056 } 11057 break; 11058 } 11059 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break; 11060 case 6: u32EffAddr = pCtx->esi; break; 11061 case 7: u32EffAddr = pCtx->edi; break; 11062 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11063 } 11064 11065 /* Get and add the displacement. */ 11066 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 11067 { 11068 case 0: 11069 break; 11070 case 1: 11071 { 11072 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp); 11073 u32EffAddr += i8Disp; 11074 break; 11075 } 11076 case 2: 11077 { 11078 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp); 11079 u32EffAddr += u32Disp; 11080 break; 11081 } 11082 default: 11083 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */ 11084 } 11085 } 11086 11087 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT) 11088 { 11089 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr)); 11090 return u32EffAddr; 11091 } 11092 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT); 11093 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX)); 11094 return u32EffAddr & UINT16_MAX; 11095 } 11096 11097 uint64_t u64EffAddr; 11098 11099 /* Handle the rip+disp32 form with no registers first. */ 11100 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) 11101 { 11102 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr); 11103 u64EffAddr += pCtx->rip + pIemCpu->offOpcode + cbImm; 11104 } 11105 else 11106 { 11107 /* Get the register (or SIB) value. */ 11108 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB) 11109 { 11110 case 0: u64EffAddr = pCtx->rax; break; 11111 case 1: u64EffAddr = pCtx->rcx; break; 11112 case 2: u64EffAddr = pCtx->rdx; break; 11113 case 3: u64EffAddr = pCtx->rbx; break; 11114 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break; 11115 case 6: u64EffAddr = pCtx->rsi; break; 11116 case 7: u64EffAddr = pCtx->rdi; break; 11117 case 8: u64EffAddr = pCtx->r8; break; 11118 case 9: u64EffAddr = pCtx->r9; break; 11119 case 10: u64EffAddr = pCtx->r10; break; 11120 case 11: u64EffAddr = pCtx->r11; break; 11121 case 13: u64EffAddr = pCtx->r13; break; 11122 case 14: u64EffAddr = pCtx->r14; break; 11123 case 15: u64EffAddr = pCtx->r15; break; 11124 /* SIB */ 11125 case 4: 11126 case 12: 11127 { 11128 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib); 11129 11130 /* Get the index and scale it. */ 11131 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex) 11132 { 11133 case 0: u64EffAddr = pCtx->rax; break; 11134 case 1: u64EffAddr = pCtx->rcx; break; 11135 case 2: u64EffAddr = pCtx->rdx; break; 11136 case 3: u64EffAddr = pCtx->rbx; break; 11137 case 4: u64EffAddr = 0; /*none */ break; 11138 case 5: u64EffAddr = pCtx->rbp; break; 11139 case 6: u64EffAddr = pCtx->rsi; break; 11140 case 7: u64EffAddr = pCtx->rdi; break; 11141 case 8: u64EffAddr = pCtx->r8; break; 11142 case 9: u64EffAddr = pCtx->r9; break; 11143 case 10: u64EffAddr = pCtx->r10; break; 11144 case 11: u64EffAddr = pCtx->r11; break; 11145 case 12: u64EffAddr = pCtx->r12; break; 11146 case 13: u64EffAddr = pCtx->r13; break; 11147 case 14: u64EffAddr = pCtx->r14; break; 11148 case 15: u64EffAddr = pCtx->r15; break; 11149 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11150 } 11151 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 11152 11153 /* add base */ 11154 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB) 11155 { 11156 case 0: u64EffAddr += pCtx->rax; break; 11157 case 1: u64EffAddr += pCtx->rcx; break; 11158 case 2: u64EffAddr += pCtx->rdx; break; 11159 case 3: u64EffAddr += pCtx->rbx; break; 11160 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break; 11161 case 6: u64EffAddr += pCtx->rsi; break; 11162 case 7: u64EffAddr += pCtx->rdi; break; 11163 case 8: u64EffAddr += pCtx->r8; break; 11164 case 9: u64EffAddr += pCtx->r9; break; 11165 case 10: u64EffAddr += pCtx->r10; break; 11166 case 11: u64EffAddr += pCtx->r11; break; 11167 case 12: u64EffAddr += pCtx->r12; break; 11168 case 14: u64EffAddr += pCtx->r14; break; 11169 case 15: u64EffAddr += pCtx->r15; break; 11170 /* complicated encodings */ 11171 case 5: 11172 case 13: 11173 if ((bRm & X86_MODRM_MOD_MASK) != 0) 11174 { 11175 if (!pIemCpu->uRexB) 11176 { 11177 u64EffAddr += pCtx->rbp; 11178 SET_SS_DEF(); 11179 } 11180 else 11181 u64EffAddr += pCtx->r13; 11182 } 11183 else 11184 { 11185 uint32_t u32Disp; 11186 IEM_OPCODE_GET_NEXT_U32(&u32Disp); 11187 u64EffAddr += (int32_t)u32Disp; 11188 } 11189 break; 11190 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11191 } 11192 break; 11193 } 11194 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11195 } 11196 11197 /* Get and add the displacement. */ 11198 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 11199 { 11200 case 0: 11201 break; 11202 case 1: 11203 { 11204 int8_t i8Disp; 11205 IEM_OPCODE_GET_NEXT_S8(&i8Disp); 11206 u64EffAddr += i8Disp; 11207 break; 11208 } 11209 case 2: 11210 { 11211 uint32_t u32Disp; 11212 IEM_OPCODE_GET_NEXT_U32(&u32Disp); 11213 u64EffAddr += (int32_t)u32Disp; 11214 break; 11215 } 11216 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */ 11217 } 11218 11219 } 11220 11221 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT) 11222 { 11223 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr)); 11224 return u64EffAddr; 11225 } 11226 Assert(pIemCpu->enmEffAddrMode == IEMMODE_32BIT); 11227 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX)); 11228 return u64EffAddr & UINT32_MAX; 11229 } 11230 #endif /* IEM_WITH_SETJMP */ 11231 10061 11232 10062 11233 /** @} */ … … 11207 12378 DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit) 11208 12379 { 12380 #ifdef IEM_WITH_SETJMP 12381 VBOXSTRICTRC rcStrict; 12382 jmp_buf JmpBuf; 12383 jmp_buf *pSavedJmpBuf = pIemCpu->CTX_SUFF(pJmpBuf); 12384 pIemCpu->CTX_SUFF(pJmpBuf) = &JmpBuf; 12385 if ((rcStrict = setjmp(JmpBuf)) == 0) 12386 { 12387 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); 12388 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]); 12389 } 12390 pIemCpu->CTX_SUFF(pJmpBuf) = pSavedJmpBuf; 12391 #else 11209 12392 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); 11210 12393 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]); 12394 #endif 11211 12395 if (rcStrict == VINF_SUCCESS) 11212 12396 pIemCpu->cInstructions++; … … 11227 12411 if (rcStrict == VINF_SUCCESS) 11228 12412 { 11229 # 12413 #ifdef LOG_ENABLED 11230 12414 iemLogCurInstr(IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false); 11231 # endif 12415 #endif 12416 #ifdef IEM_WITH_SETJMP 12417 pIemCpu->CTX_SUFF(pJmpBuf) = &JmpBuf; 12418 if ((rcStrict = setjmp(JmpBuf)) == 0) 12419 { 12420 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); 12421 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]); 12422 } 12423 pIemCpu->CTX_SUFF(pJmpBuf) = pSavedJmpBuf; 12424 #else 11232 12425 IEM_OPCODE_GET_NEXT_U8(&b); 11233 12426 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]); 12427 #endif 11234 12428 if (rcStrict == VINF_SUCCESS) 11235 12429 pIemCpu->cInstructions++; -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r61665 r61885 523 523 524 524 525 /** Invalid with RM byte . */ 526 FNIEMOPRM_DEF(iemOp_InvalidWithRM) 527 { 528 IEMOP_MNEMONIC("InvalidWithRM"); 529 return IEMOP_RAISE_INVALID_OPCODE(); 530 } 531 532 525 533 526 534 /** @name ..... opcodes. … … 538 546 539 547 /** Opcode 0x0f 0x00 /0. */ 540 FNIEMOP _DEF_1(iemOp_Grp6_sldt, uint8_t, bRm)548 FNIEMOPRM_DEF(iemOp_Grp6_sldt) 541 549 { 542 550 IEMOP_MNEMONIC("sldt Rv/Mw"); … … 596 604 597 605 /** Opcode 0x0f 0x00 /1. */ 598 FNIEMOP _DEF_1(iemOp_Grp6_str, uint8_t, bRm)606 FNIEMOPRM_DEF(iemOp_Grp6_str) 599 607 { 600 608 IEMOP_MNEMONIC("str Rv/Mw"); … … 654 662 655 663 /** Opcode 0x0f 0x00 /2. */ 656 FNIEMOP _DEF_1(iemOp_Grp6_lldt, uint8_t, bRm)664 FNIEMOPRM_DEF(iemOp_Grp6_lldt) 657 665 { 658 666 IEMOP_MNEMONIC("lldt Ew"); … … 686 694 687 695 /** Opcode 0x0f 0x00 /3. */ 688 FNIEMOP _DEF_1(iemOp_Grp6_ltr, uint8_t, bRm)696 FNIEMOPRM_DEF(iemOp_Grp6_ltr) 689 697 { 690 698 IEMOP_MNEMONIC("ltr Ew"); … … 750 758 751 759 /** Opcode 0x0f 0x00 /4. */ 752 FNIEMOP _DEF_1(iemOp_Grp6_verr, uint8_t, bRm)760 FNIEMOPRM_DEF(iemOp_Grp6_verr) 753 761 { 754 762 IEMOP_MNEMONIC("verr Ew"); … … 759 767 760 768 /** Opcode 0x0f 0x00 /5. */ 761 FNIEMOP _DEF_1(iemOp_Grp6_verw, uint8_t, bRm)769 FNIEMOPRM_DEF(iemOp_Grp6_verw) 762 770 { 763 771 IEMOP_MNEMONIC("verr Ew"); … … 767 775 768 776 777 /** 778 * Group 6 jump table. 779 */ 780 IEM_STATIC const PFNIEMOPRM g_apfnGroup6[8] = 781 { 782 iemOp_Grp6_sldt, 783 iemOp_Grp6_str, 784 iemOp_Grp6_lldt, 785 iemOp_Grp6_ltr, 786 iemOp_Grp6_verr, 787 iemOp_Grp6_verw, 788 iemOp_InvalidWithRM, 789 iemOp_InvalidWithRM 790 }; 791 769 792 /** Opcode 0x0f 0x00. */ 770 793 FNIEMOP_DEF(iemOp_Grp6) 771 794 { 772 795 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 773 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) 774 { 775 case 0: return FNIEMOP_CALL_1(iemOp_Grp6_sldt, bRm); 776 case 1: return FNIEMOP_CALL_1(iemOp_Grp6_str, bRm); 777 case 2: return FNIEMOP_CALL_1(iemOp_Grp6_lldt, bRm); 778 case 3: return FNIEMOP_CALL_1(iemOp_Grp6_ltr, bRm); 779 case 4: return FNIEMOP_CALL_1(iemOp_Grp6_verr, bRm); 780 case 5: return FNIEMOP_CALL_1(iemOp_Grp6_verw, bRm); 781 case 6: return IEMOP_RAISE_INVALID_OPCODE(); 782 case 7: return IEMOP_RAISE_INVALID_OPCODE(); 783 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 784 } 785 796 return FNIEMOP_CALL_1(g_apfnGroup6[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK], bRm); 786 797 } 787 798 … … 7126 7137 7127 7138 7128 const PFNIEMOP g_apfnTwoByteMap[256] =7139 IEM_STATIC const PFNIEMOP g_apfnTwoByteMap[256] = 7129 7140 { 7130 7141 /* 0x00 */ iemOp_Grp6, … … 14349 14360 14350 14361 /** Used by iemOp_EscF1. */ 14351 staticconst PFNIEMOP g_apfnEscF1_E0toFF[32] =14362 IEM_STATIC const PFNIEMOP g_apfnEscF1_E0toFF[32] = 14352 14363 { 14353 14364 /* 0xe0 */ iemOp_fchs, -
trunk/src/VBox/VMM/include/IEMInternal.h
r60907 r61885 23 23 #include <VBox/vmm/stam.h> 24 24 #include <VBox/param.h> 25 26 #include <setjmp.h> 25 27 26 28 … … 232 234 /** Pointer to the CPU context - ring-3 context. */ 233 235 R3PTRTYPE(PCPUMCTX) pCtxR3; 236 /** Pointer set jump buffer - ring-3 context. */ 237 R3PTRTYPE(jmp_buf *) pJmpBufR3; 234 238 /** Pointer to the CPU context - ring-0 context. */ 235 239 R0PTRTYPE(PCPUMCTX) pCtxR0; 240 /** Pointer set jump buffer - ring-0 context. */ 241 R0PTRTYPE(jmp_buf *) pJmpBufR0; 236 242 /** Pointer to the CPU context - raw-mode context. */ 237 243 RCPTRTYPE(PCPUMCTX) pCtxRC; 244 /** Pointer set jump buffer - raw-mode context. */ 245 RCPTRTYPE(jmp_buf *) pJmpBufRC; 238 246 239 247 /** Offset of the VMCPU structure relative to this structure (negative). */
Note:
See TracChangeset
for help on using the changeset viewer.