- Timestamp:
- Jul 11, 2016 6:30:07 PM (9 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r62138 r62171 82 82 //#define IEM_LOG_MEMORY_WRITES 83 83 #define IEM_IMPLEMENTS_TASKSWITCH 84 //#define IEM_WITH_CODE_TLB - work in progress 84 85 85 86 … … 733 734 IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc); 734 735 IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu); 736 #ifdef IEM_WITH_SETJMP 737 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu); 738 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess); 739 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel); 740 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess); 741 #endif 742 735 743 IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess); 736 744 IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess); … … 859 867 pVCpu->iem.s.uRexIndex = 127; 860 868 pVCpu->iem.s.iEffSeg = 127; 869 pVCpu->iem.s.uFpuOpcode = UINT16_MAX; 870 # ifdef IEM_WITH_CODE_TLB 871 pVCpu->iem.s.offInstrNextByte = UINT16_MAX; 872 pVCpu->iem.s.pbInstrBuf = NULL; 873 pVCpu->iem.s.cbInstrBuf = UINT16_MAX; 874 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX; 875 pVCpu->iem.s.offCurInstrStart = UINT16_MAX; 876 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff); 877 # else 861 878 pVCpu->iem.s.offOpcode = 127; 862 879 pVCpu->iem.s.cbOpcode = 127; 880 # endif 863 881 #endif 864 882 … … 896 914 #endif 897 915 #ifdef VBOX_STRICT 916 # ifdef IEM_WITH_CODE_TLB 917 # else 898 918 pVCpu->iem.s.cbOpcode = 0; 919 # endif 899 920 #else 900 921 NOREF(pVCpu); … … 956 977 pVCpu->iem.s.uRexIndex = 0; 957 978 pVCpu->iem.s.iEffSeg = X86_SREG_DS; 979 #ifdef IEM_WITH_CODE_TLB 980 pVCpu->iem.s.pbInstrBuf = NULL; 981 pVCpu->iem.s.offInstrNextByte = 0; 982 pVCpu->iem.s.offCurInstrStart = 0; 983 # ifdef VBOX_STRICT 984 pVCpu->iem.s.cbInstrBuf = UINT16_MAX; 985 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX; 986 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff); 987 # endif 988 #else 958 989 pVCpu->iem.s.offOpcode = 0; 959 990 pVCpu->iem.s.cbOpcode = 0; 991 #endif 960 992 pVCpu->iem.s.cActiveMappings = 0; 961 993 pVCpu->iem.s.iNextMapping = 0; … … 1036 1068 pVCpu->iem.s.uRexIndex = 0; 1037 1069 pVCpu->iem.s.iEffSeg = X86_SREG_DS; 1038 // busted and need rewrite: 1039 //if (pVCpu->iem.s.cbOpcode > pVCpu->iem.s.offOpcode) /* No need to check RIP here because branch instructions will update cbOpcode. */ 1040 //{ 1041 // pVCpu->iem.s.cbOpcode -= pVCpu->iem.s.offOpcode; 1042 // memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode], pVCpu->iem.s.cbOpcode); 1043 //} 1044 //else 1045 pVCpu->iem.s.cbOpcode = 0; 1070 #ifdef IEM_WITH_CODE_TLB 1071 if (pVCpu->iem.s.pbInstrBuf) 1072 { 1073 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base) 1074 - pVCpu->iem.s.uInstrBufPc; 1075 if (off < pVCpu->iem.s.cbInstrBufTotal) 1076 { 1077 pVCpu->iem.s.offInstrNextByte = (uint32_t)off; 1078 pVCpu->iem.s.offCurInstrStart = (uint16_t)off; 1079 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal) 1080 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15; 1081 else 1082 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal; 1083 } 1084 else 1085 { 1086 pVCpu->iem.s.pbInstrBuf = NULL; 1087 pVCpu->iem.s.offInstrNextByte = 0; 1088 pVCpu->iem.s.offCurInstrStart = 0; 1089 } 1090 } 1091 else 1092 { 1093 pVCpu->iem.s.offInstrNextByte = 0; 1094 pVCpu->iem.s.offCurInstrStart = 0; 1095 } 1096 #else 1097 pVCpu->iem.s.cbOpcode = 0; 1046 1098 pVCpu->iem.s.offOpcode = 0; 1099 #endif 1047 1100 Assert(pVCpu->iem.s.cActiveMappings == 0); 1048 1101 pVCpu->iem.s.iNextMapping = 0; … … 1095 1148 #endif 1096 1149 iemInitDecoder(pVCpu, fBypassHandlers); 1150 1151 #ifdef IEM_WITH_CODE_TLB 1152 /** @todo Do ITLB lookup here. */ 1153 1154 #else /* !IEM_WITH_CODE_TLB */ 1097 1155 1098 1156 /* … … 1128 1186 } 1129 1187 1130 # ifdef VBOX_WITH_RAW_MODE_NOT_R01188 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 1131 1189 /* Allow interpretation of patch manager code blocks since they can for 1132 1190 instance throw #PFs for perfectly good reasons. */ … … 1139 1197 return VINF_SUCCESS; 1140 1198 } 1141 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */1199 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 1142 1200 1143 1201 RTGCPHYS GCPhys; … … 1164 1222 * TLB... */ 1165 1223 1166 # ifdef IEM_VERIFICATION_MODE_FULL1224 # ifdef IEM_VERIFICATION_MODE_FULL 1167 1225 /* 1168 1226 * Optimistic optimization: Use unconsumed opcode bytes from the previous … … 1181 1239 return VINF_SUCCESS; 1182 1240 } 1183 # endif1241 # endif 1184 1242 1185 1243 /* … … 1187 1245 */ 1188 1246 PVM pVM = pVCpu->CTX_SUFF(pVM); 1189 # if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)1247 # if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0) 1190 1248 size_t cbActual; 1191 1249 if ( PATMIsEnabled(pVM) … … 1197 1255 } 1198 1256 else 1199 # endif1257 # endif 1200 1258 { 1201 1259 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK); … … 1239 1297 pVCpu->iem.s.cbOpcode = cbToTryRead; 1240 1298 } 1241 1299 #endif /* !IEM_WITH_CODE_TLB */ 1242 1300 return VINF_SUCCESS; 1243 1301 } 1244 1302 1245 1303 1246 /** 1247 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate 1248 * exception if it fails. 1249 * 1250 * @returns Strict VBox status code. 1304 #ifdef IEM_WITH_CODE_TLB 1305 1306 /** 1307 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on 1308 * failure and jumps. 1309 * 1310 * We end up here for a number of reasons: 1311 * - pbInstrBuf isn't yet initialized. 1312 * - Advancing beyond the buffer boundrary (e.g. cross page). 1313 * - Advancing beyond the CS segment limit. 1314 * - Fetching from non-mappable page (e.g. MMIO). 1315 * 1251 1316 * @param pVCpu The cross context virtual CPU structure of the 1252 1317 * calling thread. 1253 * @param cbMin The minimum number of bytes relative offOpcode 1254 * that must be read. 1255 */ 1256 IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin) 1257 { 1318 * @param pvDst Where to return the bytes. 1319 * @param cbDst Number of bytes to read. 1320 * 1321 * @todo Make cbDst = 0 a way of initializing pbInstrBuf? 1322 */ 1323 IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst) 1324 { 1325 Assert(cbDst <= 8); 1326 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte; 1327 1328 /* 1329 * We might have a partial buffer match, deal with that first to make the 1330 * rest simpler. This is the first part of the cross page/buffer case. 1331 */ 1332 if (pVCpu->iem.s.pbInstrBuf != NULL) 1333 { 1334 if (offBuf < pVCpu->iem.s.cbInstrBuf) 1335 { 1336 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf); 1337 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte; 1338 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy); 1339 1340 cbDst -= cbCopy; 1341 pvDst = (uint8_t *)pvDst + cbCopy; 1342 offBuf += cbCopy; 1343 pVCpu->iem.s.offInstrNextByte += offBuf; 1344 } 1345 } 1346 1347 /* 1348 * Check segment limit, figuring how much we're allowed to access at this point. 1349 */ 1350 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1351 RTGCPTR GCPtrFirst; 1352 uint32_t cbMaxRead; 1353 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 1354 { 1355 GCPtrFirst = pCtx->rip + (offBuf - pVCpu->iem.s.offCurInstrStart); 1356 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst))) 1357 { /* likely */ } 1358 else 1359 iemRaiseGeneralProtectionFault0Jmp(pVCpu); 1360 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK); 1361 } 1362 else 1363 { 1364 GCPtrFirst = pCtx->eip + (offBuf - pVCpu->iem.s.offCurInstrStart); 1365 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); 1366 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit)) 1367 { /* likely */ } 1368 else 1369 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1370 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1; 1371 if (cbMaxRead != 0) 1372 { /* likely */ } 1373 else 1374 { 1375 /* Overflowed because address is 0 and limit is max. */ 1376 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX); 1377 cbMaxRead = X86_PAGE_SIZE; 1378 } 1379 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base; 1380 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK); 1381 if (cbMaxRead2 < cbMaxRead) 1382 cbMaxRead = cbMaxRead2; 1383 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */ 1384 } 1385 1386 /* 1387 * Try use the code TLB to translate the address. 1388 */ 1389 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision; 1390 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256); 1391 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag]; 1392 if (pTlbe->uTag == uTag) 1393 { 1394 1395 } 1396 1397 1398 1258 1399 /* 1259 1400 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap. … … 1261 1402 * First translate CS:rIP to a physical address. 1262 1403 */ 1263 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1404 # if 0 /** @todo later */ 1264 1405 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin); 1265 1406 uint32_t cbToTryRead; … … 1301 1442 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */ 1302 1443 1303 # ifdef VBOX_WITH_RAW_MODE_NOT_R01444 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 1304 1445 /* Allow interpretation of patch manager code blocks since they can for 1305 1446 instance throw #PFs for perfectly good reasons. */ … … 1312 1453 return VINF_SUCCESS; 1313 1454 } 1314 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */1455 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 1315 1456 1316 1457 RTGCPHYS GCPhys; … … 1379 1520 pVCpu->iem.s.cbOpcode += cbToTryRead; 1380 1521 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode)); 1522 # endif 1523 } 1524 1525 #else 1526 1527 /** 1528 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate 1529 * exception if it fails. 1530 * 1531 * @returns Strict VBox status code. 1532 * @param pVCpu The cross context virtual CPU structure of the 1533 * calling thread. 1534 * @param cbMin The minimum number of bytes relative offOpcode 1535 * that must be read. 1536 */ 1537 IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin) 1538 { 1539 /* 1540 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap. 1541 * 1542 * First translate CS:rIP to a physical address. 1543 */ 1544 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1545 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin); 1546 uint32_t cbToTryRead; 1547 RTGCPTR GCPtrNext; 1548 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 1549 { 1550 cbToTryRead = PAGE_SIZE; 1551 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode; 1552 if (!IEM_IS_CANONICAL(GCPtrNext)) 1553 return iemRaiseGeneralProtectionFault0(pVCpu); 1554 } 1555 else 1556 { 1557 uint32_t GCPtrNext32 = pCtx->eip; 1558 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); 1559 GCPtrNext32 += pVCpu->iem.s.cbOpcode; 1560 if (GCPtrNext32 > pCtx->cs.u32Limit) 1561 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1562 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1; 1563 if (!cbToTryRead) /* overflowed */ 1564 { 1565 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX); 1566 cbToTryRead = UINT32_MAX; 1567 /** @todo check out wrapping around the code segment. */ 1568 } 1569 if (cbToTryRead < cbMin - cbLeft) 1570 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1571 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32; 1572 } 1573 1574 /* Only read up to the end of the page, and make sure we don't read more 1575 than the opcode buffer can hold. */ 1576 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK); 1577 if (cbToTryRead > cbLeftOnPage) 1578 cbToTryRead = cbLeftOnPage; 1579 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode) 1580 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode; 1581 /** @todo r=bird: Convert assertion into undefined opcode exception? */ 1582 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */ 1583 1584 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 1585 /* Allow interpretation of patch manager code blocks since they can for 1586 instance throw #PFs for perfectly good reasons. */ 1587 if (pVCpu->iem.s.fInPatchCode) 1588 { 1589 size_t cbRead = 0; 1590 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead); 1591 AssertRCReturn(rc, rc); 1592 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0); 1593 return VINF_SUCCESS; 1594 } 1595 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 1596 1597 RTGCPHYS GCPhys; 1598 uint64_t fFlags; 1599 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys); 1600 if (RT_FAILURE(rc)) 1601 { 1602 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc)); 1603 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc); 1604 } 1605 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3) 1606 { 1607 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext)); 1608 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1609 } 1610 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE)) 1611 { 1612 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext)); 1613 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1614 } 1615 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK; 1616 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode)); 1617 /** @todo Check reserved bits and such stuff. PGM is better at doing 1618 * that, so do it when implementing the guest virtual address 1619 * TLB... */ 1620 1621 /* 1622 * Read the bytes at this address. 1623 * 1624 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already, 1625 * and since PATM should only patch the start of an instruction there 1626 * should be no need to check again here. 1627 */ 1628 if (!pVCpu->iem.s.fBypassHandlers) 1629 { 1630 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], 1631 cbToTryRead, PGMACCESSORIGIN_IEM); 1632 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 1633 { /* likely */ } 1634 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict)) 1635 { 1636 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n", 1637 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead)); 1638 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1639 } 1640 else 1641 { 1642 Log((RT_SUCCESS(rcStrict) 1643 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n" 1644 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n", 1645 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead)); 1646 return rcStrict; 1647 } 1648 } 1649 else 1650 { 1651 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead); 1652 if (RT_SUCCESS(rc)) 1653 { /* likely */ } 1654 else 1655 { 1656 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc)); 1657 return rc; 1658 } 1659 } 1660 pVCpu->iem.s.cbOpcode += cbToTryRead; 1661 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode)); 1381 1662 1382 1663 return VINF_SUCCESS; 1383 1664 } 1384 1665 1666 #endif /* !IEM_WITH_CODE_TLB */ 1385 1667 #ifndef IEM_WITH_SETJMP 1386 1668 … … 1438 1720 DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu) 1439 1721 { 1722 # ifdef IEM_WITH_CODE_TLB 1723 uint8_t u8; 1724 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8); 1725 return u8; 1726 # else 1440 1727 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1); 1441 1728 if (rcStrict == VINF_SUCCESS) 1442 1729 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++]; 1443 1730 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 1731 # endif 1444 1732 } 1445 1733 … … 1453 1741 DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu) 1454 1742 { 1743 # ifdef IEM_WITH_CODE_TLB 1744 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 1745 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; 1746 if (RT_LIKELY( pbBuf != NULL 1747 && offBuf < pVCpu->iem.s.cbInstrBuf)) 1748 { 1749 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1; 1750 return pbBuf[offBuf]; 1751 } 1752 # else 1455 1753 uintptr_t offOpcode = pVCpu->iem.s.offOpcode; 1456 1754 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode)) … … 1459 1757 return pVCpu->iem.s.abOpcode[offOpcode]; 1460 1758 } 1759 # endif 1461 1760 return iemOpcodeGetNextU8SlowJmp(pVCpu); 1462 1761 } … … 1760 2059 DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu) 1761 2060 { 2061 # ifdef IEM_WITH_CODE_TLB 2062 uint16_t u16; 2063 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16); 2064 return u16; 2065 # else 1762 2066 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2); 1763 2067 if (rcStrict == VINF_SUCCESS) … … 1765 2069 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 1766 2070 pVCpu->iem.s.offOpcode += 2; 1767 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2071 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 1768 2072 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 2073 # else 2074 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 2075 # endif 2076 } 2077 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 2078 # endif 2079 } 2080 2081 2082 /** 2083 * Fetches the next opcode word, longjmp on error. 2084 * 2085 * @returns The opcode word. 2086 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2087 */ 2088 DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu) 2089 { 2090 # ifdef IEM_WITH_CODE_TLB 2091 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 2092 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; 2093 if (RT_LIKELY( pbBuf != NULL 2094 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf)) 2095 { 2096 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2; 2097 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 2098 return *(uint16_t const *)&pbBuf[offBuf]; 2099 # else 2100 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]); 2101 # endif 2102 } 1769 2103 # else 1770 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);1771 # endif1772 }1773 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));1774 }1775 1776 1777 /**1778 * Fetches the next opcode word, longjmp on error.1779 *1780 * @returns The opcode word.1781 * @param pVCpu The cross context virtual CPU structure of the calling thread.1782 */1783 DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)1784 {1785 2104 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 1786 2105 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode)) 1787 2106 { 1788 2107 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2; 1789 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2108 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 1790 2109 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 1791 # else2110 # else 1792 2111 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 2112 # endif 2113 } 1793 2114 # endif 1794 }1795 2115 return iemOpcodeGetNextU16SlowJmp(pVCpu); 1796 2116 } … … 2046 2366 DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu) 2047 2367 { 2368 # ifdef IEM_WITH_CODE_TLB 2369 uint32_t u32; 2370 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32); 2371 return u32; 2372 # else 2048 2373 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4); 2049 2374 if (rcStrict == VINF_SUCCESS) … … 2051 2376 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 2052 2377 pVCpu->iem.s.offOpcode = offOpcode + 4; 2053 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2378 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 2054 2379 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 2055 # else2380 # else 2056 2381 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 2057 2382 pVCpu->iem.s.abOpcode[offOpcode + 1], 2058 2383 pVCpu->iem.s.abOpcode[offOpcode + 2], 2059 2384 pVCpu->iem.s.abOpcode[offOpcode + 3]); 2385 # endif 2386 } 2387 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 2060 2388 # endif 2061 }2062 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));2063 2389 } 2064 2390 … … 2072 2398 DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu) 2073 2399 { 2400 # ifdef IEM_WITH_CODE_TLB 2401 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 2402 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; 2403 if (RT_LIKELY( pbBuf != NULL 2404 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf)) 2405 { 2406 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4; 2407 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 2408 return *(uint32_t const *)&pbBuf[offBuf]; 2409 # else 2410 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf], 2411 pbBuf[offBuf + 1], 2412 pbBuf[offBuf + 2], 2413 pbBuf[offBuf + 3]); 2414 # endif 2415 } 2416 # else 2074 2417 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 2075 2418 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode)) 2076 2419 { 2077 2420 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4; 2078 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2421 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 2079 2422 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 2080 # else2423 # else 2081 2424 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 2082 2425 pVCpu->iem.s.abOpcode[offOpcode + 1], 2083 2426 pVCpu->iem.s.abOpcode[offOpcode + 2], 2084 2427 pVCpu->iem.s.abOpcode[offOpcode + 3]); 2428 # endif 2429 } 2085 2430 # endif 2086 }2087 2431 return iemOpcodeGetNextU32SlowJmp(pVCpu); 2088 2432 } … … 2359 2703 DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu) 2360 2704 { 2705 # ifdef IEM_WITH_CODE_TLB 2706 uint64_t u64; 2707 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64); 2708 return u64; 2709 # else 2361 2710 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8); 2362 2711 if (rcStrict == VINF_SUCCESS) … … 2364 2713 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 2365 2714 pVCpu->iem.s.offOpcode = offOpcode + 8; 2366 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2715 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 2367 2716 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 2368 # else2717 # else 2369 2718 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 2370 2719 pVCpu->iem.s.abOpcode[offOpcode + 1], … … 2375 2724 pVCpu->iem.s.abOpcode[offOpcode + 6], 2376 2725 pVCpu->iem.s.abOpcode[offOpcode + 7]); 2726 # endif 2727 } 2728 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 2377 2729 # endif 2378 }2379 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));2380 2730 } 2381 2731 … … 2389 2739 DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu) 2390 2740 { 2741 # ifdef IEM_WITH_CODE_TLB 2742 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 2743 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; 2744 if (RT_LIKELY( pbBuf != NULL 2745 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf)) 2746 { 2747 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8; 2748 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 2749 return *(uint64_t const *)&pbBuf[offBuf]; 2750 # else 2751 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf], 2752 pbBuf[offBuf + 1], 2753 pbBuf[offBuf + 2], 2754 pbBuf[offBuf + 3], 2755 pbBuf[offBuf + 4], 2756 pbBuf[offBuf + 5], 2757 pbBuf[offBuf + 6], 2758 pbBuf[offBuf + 7]); 2759 # endif 2760 } 2761 # else 2391 2762 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 2392 2763 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode)) 2393 2764 { 2394 2765 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8; 2395 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2766 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 2396 2767 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 2397 # else2768 # else 2398 2769 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 2399 2770 pVCpu->iem.s.abOpcode[offOpcode + 1], … … 2404 2775 pVCpu->iem.s.abOpcode[offOpcode + 6], 2405 2776 pVCpu->iem.s.abOpcode[offOpcode + 7]); 2777 # endif 2778 } 2406 2779 # endif 2407 }2408 2780 return iemOpcodeGetNextU64SlowJmp(pVCpu); 2409 2781 } … … 4420 4792 #endif 4421 4793 4794 #ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */ 4422 4795 /* 4423 4796 * Flush prefetch buffer 4424 4797 */ 4425 4798 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 4799 #endif 4426 4800 4427 4801 /* … … 4531 4905 4532 4906 /* Flush the prefetch buffer. */ 4533 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 4907 #ifdef IEM_WITH_CODE_TLB 4908 pVCpu->iem.s.pbInstrBuf = NULL; 4909 #else 4910 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu); 4911 #endif 4534 4912 4535 4913 /* … … 5273 5651 case IEMMODE_16BIT: 5274 5652 { 5275 uint16_t uNewIp = pCtx->ip + offNextInstr + pVCpu->iem.s.offOpcode;5653 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu); 5276 5654 if ( uNewIp > pCtx->cs.u32Limit 5277 5655 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */ … … 5286 5664 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 5287 5665 5288 uint32_t uNewEip = pCtx->eip + offNextInstr + pVCpu->iem.s.offOpcode;5666 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu); 5289 5667 if (uNewEip > pCtx->cs.u32Limit) 5290 5668 return iemRaiseGeneralProtectionFault0(pVCpu); … … 5297 5675 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); 5298 5676 5299 uint64_t uNewRip = pCtx->rip + offNextInstr + pVCpu->iem.s.offOpcode;5677 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu); 5300 5678 if (!IEM_IS_CANONICAL(uNewRip)) 5301 5679 return iemRaiseGeneralProtectionFault0(pVCpu); … … 5309 5687 pCtx->eflags.Bits.u1RF = 0; 5310 5688 5689 #ifndef IEM_WITH_CODE_TLB 5311 5690 /* Flush the prefetch buffer. */ 5312 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 5691 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu); 5692 #endif 5313 5693 5314 5694 return VINF_SUCCESS; … … 5331 5711 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT); 5332 5712 5333 uint16_t uNewIp = pCtx->ip + offNextInstr + pVCpu->iem.s.offOpcode;5713 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu); 5334 5714 if ( uNewIp > pCtx->cs.u32Limit 5335 5715 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */ … … 5339 5719 pCtx->eflags.Bits.u1RF = 0; 5340 5720 5721 #ifndef IEM_WITH_CODE_TLB 5341 5722 /* Flush the prefetch buffer. */ 5342 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 5723 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu); 5724 #endif 5343 5725 5344 5726 return VINF_SUCCESS; … … 5365 5747 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 5366 5748 5367 uint32_t uNewEip = pCtx->eip + offNextInstr + pVCpu->iem.s.offOpcode;5749 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu); 5368 5750 if (uNewEip > pCtx->cs.u32Limit) 5369 5751 return iemRaiseGeneralProtectionFault0(pVCpu); … … 5374 5756 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); 5375 5757 5376 uint64_t uNewRip = pCtx->rip + offNextInstr + pVCpu->iem.s.offOpcode;5758 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu); 5377 5759 if (!IEM_IS_CANONICAL(uNewRip)) 5378 5760 return iemRaiseGeneralProtectionFault0(pVCpu); … … 5381 5763 pCtx->eflags.Bits.u1RF = 0; 5382 5764 5765 #ifndef IEM_WITH_CODE_TLB 5383 5766 /* Flush the prefetch buffer. */ 5384 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 5767 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu); 5768 #endif 5385 5769 5386 5770 return VINF_SUCCESS; … … 5440 5824 pCtx->eflags.Bits.u1RF = 0; 5441 5825 5826 #ifndef IEM_WITH_CODE_TLB 5442 5827 /* Flush the prefetch buffer. */ 5443 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 5828 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu); 5829 #endif 5444 5830 5445 5831 return VINF_SUCCESS; … … 5504 5890 IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu) 5505 5891 { 5506 return iemRegAddToRipKeepRF(pVCpu, pVCpu->iem.s.offOpcode);5892 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu)); 5507 5893 } 5508 5894 #endif … … 5546 5932 IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu) 5547 5933 { 5548 return iemRegAddToRipAndClearRF(pVCpu, pVCpu->iem.s.offOpcode);5934 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu)); 5549 5935 } 5550 5936 … … 5875 6261 DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx) 5876 6262 { 5877 pFpuCtx->FOP = pVCpu->iem.s.abOpcode[pVCpu->iem.s.offFpuOpcode]5878 | ((uint16_t)(pVCpu->iem.s.abOpcode[pVCpu->iem.s.offFpuOpcode - 1] & 0x7) << 8);6263 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX); 6264 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode; 5879 6265 /** @todo x87.CS and FPUIP needs to be kept seperately. */ 5880 6266 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) … … 10389 10775 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0. 10390 10776 */ 10391 #define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode)10777 #define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)) 10392 10778 10393 10779 /** … … 10398 10784 * @param a0 The argument. 10399 10785 */ 10400 #define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0)10786 #define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0) 10401 10787 10402 10788 /** … … 10408 10794 * @param a1 The second extra argument. 10409 10795 */ 10410 #define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0, a1)10796 #define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1) 10411 10797 10412 10798 /** … … 10419 10805 * @param a2 The third extra argument. 10420 10806 */ 10421 #define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0, a1, a2)10807 #define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2) 10422 10808 10423 10809 /** … … 10431 10817 * @param a3 The fourth extra argument. 10432 10818 */ 10433 #define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0, a1, a2, a3)10819 #define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3) 10434 10820 10435 10821 /** … … 10444 10830 * @param a4 The fifth extra argument. 10445 10831 */ 10446 #define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0, a1, a2, a3, a4)10832 #define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4) 10447 10833 10448 10834 /** … … 10455 10841 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0. 10456 10842 */ 10457 #define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode)10843 #define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)) 10458 10844 10459 10845 /** … … 10466 10852 * @param a0 The argument. 10467 10853 */ 10468 #define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0)10854 #define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0) 10469 10855 10470 10856 /** … … 10478 10864 * @param a1 The second extra argument. 10479 10865 */ 10480 #define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0, a1)10866 #define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1) 10481 10867 10482 10868 /** … … 10491 10877 * @param a2 The third extra argument. 10492 10878 */ 10493 #define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0, a1, a2)10879 #define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2) 10494 10880 10495 10881 /** … … 10968 11354 } \ 10969 11355 } while (0) 11356 10970 11357 /** 10971 11358 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes … … 11151 11538 { 11152 11539 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr); 11153 u64EffAddr += pCtx->rip + pVCpu->iem.s.offOpcode+ cbImm;11540 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm; 11154 11541 } 11155 11542 else … … 11283 11670 11284 11671 11672 /** 11673 * Calculates the effective address of a ModR/M memory operand. 11674 * 11675 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR. 11676 * 11677 * @return Strict VBox status code. 11678 * @param pVCpu The cross context virtual CPU structure of the calling thread. 11679 * @param bRm The ModRM byte. 11680 * @param cbImm The size of any immediate following the 11681 * effective address opcode bytes. Important for 11682 * RIP relative addressing. 11683 * @param pGCPtrEff Where to return the effective address. 11684 * @param offRsp RSP displacement. 11685 */ 11686 IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) 11687 { 11688 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm)); 11689 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 11690 # define SET_SS_DEF() \ 11691 do \ 11692 { \ 11693 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \ 11694 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \ 11695 } while (0) 11696 11697 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) 11698 { 11699 /** @todo Check the effective address size crap! */ 11700 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT) 11701 { 11702 uint16_t u16EffAddr; 11703 11704 /* Handle the disp16 form with no registers first. */ 11705 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6) 11706 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); 11707 else 11708 { 11709 /* Get the displacment. */ 11710 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 11711 { 11712 case 0: u16EffAddr = 0; break; 11713 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break; 11714 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break; 11715 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */ 11716 } 11717 11718 /* Add the base and index registers to the disp. */ 11719 switch (bRm & X86_MODRM_RM_MASK) 11720 { 11721 case 0: u16EffAddr += pCtx->bx + pCtx->si; break; 11722 case 1: u16EffAddr += pCtx->bx + pCtx->di; break; 11723 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break; 11724 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break; 11725 case 4: u16EffAddr += pCtx->si; break; 11726 case 5: u16EffAddr += pCtx->di; break; 11727 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break; 11728 case 7: u16EffAddr += pCtx->bx; break; 11729 } 11730 } 11731 11732 *pGCPtrEff = u16EffAddr; 11733 } 11734 else 11735 { 11736 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT); 11737 uint32_t u32EffAddr; 11738 11739 /* Handle the disp32 form with no registers first. */ 11740 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) 11741 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr); 11742 else 11743 { 11744 /* Get the register (or SIB) value. */ 11745 switch ((bRm & X86_MODRM_RM_MASK)) 11746 { 11747 case 0: u32EffAddr = pCtx->eax; break; 11748 case 1: u32EffAddr = pCtx->ecx; break; 11749 case 2: u32EffAddr = pCtx->edx; break; 11750 case 3: u32EffAddr = pCtx->ebx; break; 11751 case 4: /* SIB */ 11752 { 11753 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib); 11754 11755 /* Get the index and scale it. */ 11756 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) 11757 { 11758 case 0: u32EffAddr = pCtx->eax; break; 11759 case 1: u32EffAddr = pCtx->ecx; break; 11760 case 2: u32EffAddr = pCtx->edx; break; 11761 case 3: u32EffAddr = pCtx->ebx; break; 11762 case 4: u32EffAddr = 0; /*none */ break; 11763 case 5: u32EffAddr = pCtx->ebp; break; 11764 case 6: u32EffAddr = pCtx->esi; break; 11765 case 7: u32EffAddr = pCtx->edi; break; 11766 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11767 } 11768 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 11769 11770 /* add base */ 11771 switch (bSib & X86_SIB_BASE_MASK) 11772 { 11773 case 0: u32EffAddr += pCtx->eax; break; 11774 case 1: u32EffAddr += pCtx->ecx; break; 11775 case 2: u32EffAddr += pCtx->edx; break; 11776 case 3: u32EffAddr += pCtx->ebx; break; 11777 case 4: 11778 u32EffAddr += pCtx->esp + offRsp; 11779 SET_SS_DEF(); 11780 break; 11781 case 5: 11782 if ((bRm & X86_MODRM_MOD_MASK) != 0) 11783 { 11784 u32EffAddr += pCtx->ebp; 11785 SET_SS_DEF(); 11786 } 11787 else 11788 { 11789 uint32_t u32Disp; 11790 IEM_OPCODE_GET_NEXT_U32(&u32Disp); 11791 u32EffAddr += u32Disp; 11792 } 11793 break; 11794 case 6: u32EffAddr += pCtx->esi; break; 11795 case 7: u32EffAddr += pCtx->edi; break; 11796 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11797 } 11798 break; 11799 } 11800 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break; 11801 case 6: u32EffAddr = pCtx->esi; break; 11802 case 7: u32EffAddr = pCtx->edi; break; 11803 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11804 } 11805 11806 /* Get and add the displacement. */ 11807 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 11808 { 11809 case 0: 11810 break; 11811 case 1: 11812 { 11813 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp); 11814 u32EffAddr += i8Disp; 11815 break; 11816 } 11817 case 2: 11818 { 11819 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp); 11820 u32EffAddr += u32Disp; 11821 break; 11822 } 11823 default: 11824 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */ 11825 } 11826 11827 } 11828 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT) 11829 *pGCPtrEff = u32EffAddr; 11830 else 11831 { 11832 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT); 11833 *pGCPtrEff = u32EffAddr & UINT16_MAX; 11834 } 11835 } 11836 } 11837 else 11838 { 11839 uint64_t u64EffAddr; 11840 11841 /* Handle the rip+disp32 form with no registers first. */ 11842 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) 11843 { 11844 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr); 11845 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm; 11846 } 11847 else 11848 { 11849 /* Get the register (or SIB) value. */ 11850 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB) 11851 { 11852 case 0: u64EffAddr = pCtx->rax; break; 11853 case 1: u64EffAddr = pCtx->rcx; break; 11854 case 2: u64EffAddr = pCtx->rdx; break; 11855 case 3: u64EffAddr = pCtx->rbx; break; 11856 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break; 11857 case 6: u64EffAddr = pCtx->rsi; break; 11858 case 7: u64EffAddr = pCtx->rdi; break; 11859 case 8: u64EffAddr = pCtx->r8; break; 11860 case 9: u64EffAddr = pCtx->r9; break; 11861 case 10: u64EffAddr = pCtx->r10; break; 11862 case 11: u64EffAddr = pCtx->r11; break; 11863 case 13: u64EffAddr = pCtx->r13; break; 11864 case 14: u64EffAddr = pCtx->r14; break; 11865 case 15: u64EffAddr = pCtx->r15; break; 11866 /* SIB */ 11867 case 4: 11868 case 12: 11869 { 11870 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib); 11871 11872 /* Get the index and scale it. */ 11873 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex) 11874 { 11875 case 0: u64EffAddr = pCtx->rax; break; 11876 case 1: u64EffAddr = pCtx->rcx; break; 11877 case 2: u64EffAddr = pCtx->rdx; break; 11878 case 3: u64EffAddr = pCtx->rbx; break; 11879 case 4: u64EffAddr = 0; /*none */ break; 11880 case 5: u64EffAddr = pCtx->rbp; break; 11881 case 6: u64EffAddr = pCtx->rsi; break; 11882 case 7: u64EffAddr = pCtx->rdi; break; 11883 case 8: u64EffAddr = pCtx->r8; break; 11884 case 9: u64EffAddr = pCtx->r9; break; 11885 case 10: u64EffAddr = pCtx->r10; break; 11886 case 11: u64EffAddr = pCtx->r11; break; 11887 case 12: u64EffAddr = pCtx->r12; break; 11888 case 13: u64EffAddr = pCtx->r13; break; 11889 case 14: u64EffAddr = pCtx->r14; break; 11890 case 15: u64EffAddr = pCtx->r15; break; 11891 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11892 } 11893 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 11894 11895 /* add base */ 11896 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB) 11897 { 11898 case 0: u64EffAddr += pCtx->rax; break; 11899 case 1: u64EffAddr += pCtx->rcx; break; 11900 case 2: u64EffAddr += pCtx->rdx; break; 11901 case 3: u64EffAddr += pCtx->rbx; break; 11902 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break; 11903 case 6: u64EffAddr += pCtx->rsi; break; 11904 case 7: u64EffAddr += pCtx->rdi; break; 11905 case 8: u64EffAddr += pCtx->r8; break; 11906 case 9: u64EffAddr += pCtx->r9; break; 11907 case 10: u64EffAddr += pCtx->r10; break; 11908 case 11: u64EffAddr += pCtx->r11; break; 11909 case 12: u64EffAddr += pCtx->r12; break; 11910 case 14: u64EffAddr += pCtx->r14; break; 11911 case 15: u64EffAddr += pCtx->r15; break; 11912 /* complicated encodings */ 11913 case 5: 11914 case 13: 11915 if ((bRm & X86_MODRM_MOD_MASK) != 0) 11916 { 11917 if (!pVCpu->iem.s.uRexB) 11918 { 11919 u64EffAddr += pCtx->rbp; 11920 SET_SS_DEF(); 11921 } 11922 else 11923 u64EffAddr += pCtx->r13; 11924 } 11925 else 11926 { 11927 uint32_t u32Disp; 11928 IEM_OPCODE_GET_NEXT_U32(&u32Disp); 11929 u64EffAddr += (int32_t)u32Disp; 11930 } 11931 break; 11932 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11933 } 11934 break; 11935 } 11936 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11937 } 11938 11939 /* Get and add the displacement. */ 11940 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 11941 { 11942 case 0: 11943 break; 11944 case 1: 11945 { 11946 int8_t i8Disp; 11947 IEM_OPCODE_GET_NEXT_S8(&i8Disp); 11948 u64EffAddr += i8Disp; 11949 break; 11950 } 11951 case 2: 11952 { 11953 uint32_t u32Disp; 11954 IEM_OPCODE_GET_NEXT_U32(&u32Disp); 11955 u64EffAddr += (int32_t)u32Disp; 11956 break; 11957 } 11958 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */ 11959 } 11960 11961 } 11962 11963 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT) 11964 *pGCPtrEff = u64EffAddr; 11965 else 11966 { 11967 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT); 11968 *pGCPtrEff = u64EffAddr & UINT32_MAX; 11969 } 11970 } 11971 11972 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff)); 11973 return VINF_SUCCESS; 11974 } 11975 11976 11285 11977 #ifdef IEM_WITH_SETJMP 11286 11978 /** … … 11453 12145 { 11454 12146 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr); 11455 u64EffAddr += pCtx->rip + pVCpu->iem.s.offOpcode+ cbImm;12147 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm; 11456 12148 } 11457 12149 else … … 12755 13447 } 12756 13448 //#ifdef DEBUG 12757 // AssertMsg( pVCpu->iem.s.offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pVCpu->iem.s.offOpcode, cbInstr));13449 // AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr)); 12758 13450 //#endif 12759 13451 … … 12916 13608 { 12917 13609 iemInitDecoder(pVCpu, false); 13610 #ifdef IEM_WITH_CODE_TLB 13611 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC; 13612 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes; 13613 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes); 13614 pVCpu->iem.s.offCurInstrStart = 0; 13615 pVCpu->iem.s.offInstrNextByte = 0; 13616 #else 12918 13617 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode)); 12919 13618 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode); 13619 #endif 12920 13620 rcStrict = VINF_SUCCESS; 12921 13621 } … … 12966 13666 { 12967 13667 iemInitDecoder(pVCpu, true); 13668 #ifdef IEM_WITH_CODE_TLB 13669 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC; 13670 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes; 13671 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes); 13672 pVCpu->iem.s.offCurInstrStart = 0; 13673 pVCpu->iem.s.offInstrNextByte = 0; 13674 #else 12968 13675 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode)); 12969 13676 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode); 13677 #endif 12970 13678 rcStrict = VINF_SUCCESS; 12971 13679 } … … 13008 13716 { 13009 13717 iemInitDecoder(pVCpu, true); 13718 #ifdef IEM_WITH_CODE_TLB 13719 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC; 13720 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes; 13721 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes); 13722 pVCpu->iem.s.offCurInstrStart = 0; 13723 pVCpu->iem.s.offInstrNextByte = 0; 13724 #else 13010 13725 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode)); 13011 13726 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode); 13727 #endif 13012 13728 rcStrict = VINF_SUCCESS; 13013 13729 } -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r62076 r62171 760 760 pCtx->eflags.Bits.u1RF = 0; 761 761 762 #ifndef IEM_WITH_CODE_TLB 762 763 /* Flush the prefetch buffer. */ 763 764 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 765 #endif 764 766 return VINF_SUCCESS; 765 767 } … … 786 788 pCtx->eflags.Bits.u1RF = 0; 787 789 790 #ifndef IEM_WITH_CODE_TLB 788 791 /* Flush the prefetch buffer. */ 789 792 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 793 #endif 790 794 return VINF_SUCCESS; 791 795 } … … 830 834 pCtx->eflags.Bits.u1RF = 0; 831 835 836 #ifndef IEM_WITH_CODE_TLB 832 837 /* Flush the prefetch buffer. */ 833 838 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 839 #endif 834 840 return VINF_SUCCESS; 835 841 } … … 856 862 pCtx->eflags.Bits.u1RF = 0; 857 863 864 #ifndef IEM_WITH_CODE_TLB 858 865 /* Flush the prefetch buffer. */ 859 866 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 867 #endif 860 868 return VINF_SUCCESS; 861 869 } … … 883 891 pCtx->eflags.Bits.u1RF = 0; 884 892 893 #ifndef IEM_WITH_CODE_TLB 885 894 /* Flush the prefetch buffer. */ 886 895 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 896 #endif 887 897 return VINF_SUCCESS; 888 898 } … … 909 919 pCtx->eflags.Bits.u1RF = 0; 910 920 921 #ifndef IEM_WITH_CODE_TLB 911 922 /* Flush the prefetch buffer. */ 912 923 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 924 #endif 913 925 914 926 return VINF_SUCCESS; … … 1645 1657 1646 1658 /* Flush the prefetch buffer. */ 1659 # ifdef IEM_WITH_CODE_TLB 1660 pVCpu->iem.s.pbInstrBuf = NULL; 1661 # else 1647 1662 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 1663 # endif 1648 1664 return VINF_SUCCESS; 1649 1665 #endif … … 1870 1886 1871 1887 /* Flush the prefetch buffer. */ 1888 #ifdef IEM_WITH_CODE_TLB 1889 pVCpu->iem.s.pbInstrBuf = NULL; 1890 #else 1872 1891 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 1892 #endif 1873 1893 1874 1894 return VINF_SUCCESS; … … 2093 2113 2094 2114 /* Flush the prefetch buffer. */ 2115 #ifdef IEM_WITH_CODE_TLB 2116 pVCpu->iem.s.pbInstrBuf = NULL; 2117 #else 2095 2118 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 2096 2119 #endif 2097 2120 return VINF_SUCCESS; 2098 2121 } … … 2497 2520 2498 2521 /* Flush the prefetch buffer. */ 2522 #ifdef IEM_WITH_CODE_TLB 2523 pVCpu->iem.s.pbInstrBuf = NULL; 2524 #else 2499 2525 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 2526 #endif 2500 2527 return VINF_SUCCESS; 2501 2528 } … … 2570 2597 2571 2598 /* Flush the prefetch buffer. */ 2599 #ifndef IEM_WITH_CODE_TLB 2572 2600 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 2601 #endif 2573 2602 2574 2603 return VINF_SUCCESS; … … 2893 2922 2894 2923 /* Flush the prefetch buffer. */ 2924 #ifdef IEM_WITH_CODE_TLB 2925 pVCpu->iem.s.pbInstrBuf = NULL; 2926 #else 2895 2927 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 2928 #endif 2896 2929 2897 2930 return VINF_SUCCESS; … … 2972 3005 2973 3006 /* Flush the prefetch buffer. */ 3007 #ifdef IEM_WITH_CODE_TLB 3008 pVCpu->iem.s.pbInstrBuf = NULL; 3009 #else 2974 3010 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 3011 #endif 2975 3012 2976 3013 return VINF_SUCCESS; … … 3407 3444 3408 3445 /* Flush the prefetch buffer. */ 3446 #ifdef IEM_WITH_CODE_TLB 3447 pVCpu->iem.s.pbInstrBuf = NULL; 3448 #else 3409 3449 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 3450 #endif 3410 3451 3411 3452 return VINF_SUCCESS; … … 3707 3748 3708 3749 /* Flush the prefetch buffer. */ 3750 #ifdef IEM_WITH_CODE_TLB 3751 pVCpu->iem.s.pbInstrBuf = NULL; 3752 #else 3709 3753 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 3754 #endif 3710 3755 3711 3756 return VINF_SUCCESS; … … 3833 3878 3834 3879 /* Flush the prefetch buffer. */ 3880 #ifdef IEM_WITH_CODE_TLB 3881 pVCpu->iem.s.pbInstrBuf = NULL; 3882 #else 3835 3883 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 3884 #endif 3836 3885 3837 3886 return VINF_SUCCESS; … … 3937 3986 3938 3987 /* Flush the prefetch buffer. */ 3988 #ifdef IEM_WITH_CODE_TLB 3989 pVCpu->iem.s.pbInstrBuf = NULL; 3990 #else 3939 3991 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 3992 #endif 3940 3993 3941 3994 return VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r62111 r62171 10854 10854 #ifndef TST_IEM_CHECK_MC 10855 10855 /* Calc effective address with modified ESP. */ 10856 uint8_t const offOpcodeSaved = pVCpu->iem.s.offOpcode; 10856 /** @todo testcase */ 10857 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 10857 10858 RTGCPTR GCPtrEff; 10858 10859 VBOXSTRICTRC rcStrict; 10859 rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff); 10860 switch (pVCpu->iem.s.enmEffOpSize) 10861 { 10862 case IEMMODE_16BIT: rcStrict = iemOpHlpCalcRmEffAddrEx(pVCpu, bRm, 0, &GCPtrEff, 2); break; 10863 case IEMMODE_32BIT: rcStrict = iemOpHlpCalcRmEffAddrEx(pVCpu, bRm, 0, &GCPtrEff, 4); break; 10864 case IEMMODE_64BIT: rcStrict = iemOpHlpCalcRmEffAddrEx(pVCpu, bRm, 0, &GCPtrEff, 8); break; 10865 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 10866 } 10860 10867 if (rcStrict != VINF_SUCCESS) 10861 10868 return rcStrict; 10862 pVCpu->iem.s.offOpcode = offOpcodeSaved; 10863 10864 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 10865 uint64_t const RspSaved = pCtx->rsp; 10866 switch (pVCpu->iem.s.enmEffOpSize) 10867 { 10868 case IEMMODE_16BIT: iemRegAddToRsp(pVCpu, pCtx, 2); break; 10869 case IEMMODE_32BIT: iemRegAddToRsp(pVCpu, pCtx, 4); break; 10870 case IEMMODE_64BIT: iemRegAddToRsp(pVCpu, pCtx, 8); break; 10871 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 10872 } 10873 rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff); 10874 Assert(rcStrict == VINF_SUCCESS); 10875 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 10876 pCtx->rsp = RspSaved; 10869 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 10877 10870 10878 10871 /* Perform the operation - this should be CImpl. */ … … 13709 13702 FNIEMOP_DEF(iemOp_EscF0) 13710 13703 { 13711 pVCpu->iem.s.offFpuOpcode = pVCpu->iem.s.offOpcode - 1;13712 13704 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 13705 pVCpu->iem.s.uFpuOpcode = RT_MAKE_U16(bRm, 0xd8 & 0x7); 13713 13706 13714 13707 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) … … 14495 14488 FNIEMOP_DEF(iemOp_EscF1) 14496 14489 { 14497 pVCpu->iem.s.offFpuOpcode = pVCpu->iem.s.offOpcode - 1;14498 14490 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 14491 pVCpu->iem.s.uFpuOpcode = RT_MAKE_U16(bRm, 0xd9 & 0x7); 14492 14499 14493 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 14500 14494 { … … 14844 14838 FNIEMOP_DEF(iemOp_EscF2) 14845 14839 { 14846 pVCpu->iem.s.offFpuOpcode = pVCpu->iem.s.offOpcode - 1;14847 14840 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 14841 pVCpu->iem.s.uFpuOpcode = RT_MAKE_U16(bRm, 0xda & 0x7); 14848 14842 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 14849 14843 { … … 15304 15298 FNIEMOP_DEF(iemOp_EscF3) 15305 15299 { 15306 pVCpu->iem.s.offFpuOpcode = pVCpu->iem.s.offOpcode - 1;15307 15300 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 15301 pVCpu->iem.s.uFpuOpcode = RT_MAKE_U16(bRm, 0xdb & 0x7); 15308 15302 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 15309 15303 { … … 15588 15582 FNIEMOP_DEF(iemOp_EscF4) 15589 15583 { 15590 pVCpu->iem.s.offFpuOpcode = pVCpu->iem.s.offOpcode - 1;15591 15584 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 15585 pVCpu->iem.s.uFpuOpcode = RT_MAKE_U16(bRm, 0xdc & 0x7); 15592 15586 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 15593 15587 { … … 15898 15892 FNIEMOP_DEF(iemOp_EscF5) 15899 15893 { 15900 pVCpu->iem.s.offFpuOpcode = pVCpu->iem.s.offOpcode - 1;15901 15894 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 15895 pVCpu->iem.s.uFpuOpcode = RT_MAKE_U16(bRm, 0xdd & 0x7); 15902 15896 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 15903 15897 { … … 16145 16139 FNIEMOP_DEF(iemOp_EscF6) 16146 16140 { 16147 pVCpu->iem.s.offFpuOpcode = pVCpu->iem.s.offOpcode - 1;16148 16141 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 16142 pVCpu->iem.s.uFpuOpcode = RT_MAKE_U16(bRm, 0xde & 0x7); 16149 16143 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 16150 16144 { … … 16607 16601 case IEMMODE_16BIT: 16608 16602 IEM_MC_BEGIN(0,0); 16609 if (-(int8_t) pVCpu->iem.s.offOpcode!= i8Imm)16603 if (-(int8_t)IEM_GET_INSTR_LEN(pVCpu) != i8Imm) 16610 16604 { 16611 16605 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1); … … 16626 16620 case IEMMODE_32BIT: 16627 16621 IEM_MC_BEGIN(0,0); 16628 if (-(int8_t) pVCpu->iem.s.offOpcode!= i8Imm)16622 if (-(int8_t)IEM_GET_INSTR_LEN(pVCpu) != i8Imm) 16629 16623 { 16630 16624 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1); … … 16645 16639 case IEMMODE_64BIT: 16646 16640 IEM_MC_BEGIN(0,0); 16647 if (-(int8_t) pVCpu->iem.s.offOpcode!= i8Imm)16641 if (-(int8_t)IEM_GET_INSTR_LEN(pVCpu) != i8Imm) 16648 16642 { 16649 16643 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1); -
trunk/src/VBox/VMM/include/IEMInternal.h
r62076 r62171 319 319 } IEMTLBENTRY; 320 320 AssertCompileSize(IEMTLBENTRY, 32); 321 /** Pointer to an IEM TLB entry. */ 322 typedef IEMTLBENTRY *PIEMTLBENTRY; 321 323 322 324 /** @name IEMTLBE_F_XXX - TLB entry flags (IEMTLBENTRY::fFlagsAndPhysRev) … … 391 393 typedef struct IEMCPU 392 394 { 393 /** Pointer to the CPU context - ring-3 context. */394 R3PTRTYPE(PCPUMCTX) pCtxR3;395 /** Pointer to the CPU context - ring-0 context. */396 R0PTRTYPE(PCPUMCTX) pCtxR0;397 /** Pointer to the CPU context - raw-mode context. */398 RCPTRTYPE(PCPUMCTX) pCtxRC;399 400 395 /** Info status code that needs to be propagated to the IEM caller. 401 396 * This cannot be passed internally, as it would complicate all success … … 403 398 * to get right. Instead, we'll store status codes to pass on here. Each 404 399 * source of these codes will perform appropriate sanity checks. */ 405 int32_t rcPassUp; 400 int32_t rcPassUp; /* 0x00 */ 406 401 407 402 /** The current CPU execution mode (CS). */ 408 IEMMODE enmCpuMode; 403 IEMMODE enmCpuMode; /* 0x04 */ 409 404 /** The CPL. */ 410 uint8_t uCpl; 405 uint8_t uCpl; /* 0x08 */ 411 406 412 407 /** Whether to bypass access handlers or not. */ 413 bool fBypassHandlers; 408 bool fBypassHandlers; /* 0x09 */ 414 409 /** Indicates that we're interpreting patch code - RC only! */ 415 bool fInPatchCode; 410 bool fInPatchCode; /* 0x0a */ 416 411 417 412 /** @name Decoder state. 418 413 * @{ */ 419 /** The current offset into abOpcodes. */ 420 uint8_t offOpcode; 421 /** The size of what has currently been fetched into abOpcodes. */ 422 uint8_t cbOpcode; 423 424 /** The effective segment register (X86_SREG_XXX). */ 425 uint8_t iEffSeg; 426 414 #ifdef IEM_WITH_CODE_TLB 415 /** Unused. */ 416 uint8_t bUnused0; /* 0x0b */ 417 /** The offset of the next instruction byte. */ 418 uint32_t offInstrNextByte; /* 0x0c */ 419 /** Pointer to the page containing RIP, user specified buffer or abOpcode. 420 * This can be NULL if the page isn't mappable for some reason, in which 421 * case we'll do fallback stuff. 422 * 423 * If we're executing an instruction from a user specified buffer, 424 * IEMExecOneWithPrefetchedByPC and friends, this is not necessarily a page 425 * aligned pointer but pointer to the user data. 426 * 427 * For instructions crossing pages, this will start on the first page and be 428 * advanced to the next page by the time we've decoded the instruction. This 429 * therefore precludes stuff like <tt>pbInstrBuf[offInstrNextByte + cbInstrBuf - cbCurInstr]</tt> 430 */ 431 uint8_t const *pbInstrBuf; /* 0x10 */ 432 # if defined(IN_RC) && HC_ARCH_BITS != 32 433 uint32_t uInstrBufHigh; /** The high dword of the host context pbInstrBuf member. */ 434 # endif 435 /** The program counter corresponding to pbInstrBuf. 436 * This is set to a non-canonical address when we need to invalidate it. */ 437 uint64_t uInstrBufPc; /* 0x18 */ 438 /** The number of bytes available at pbInstrBuf for the current instruction. 439 * This takes the max opcode length into account so that doesn't need to be 440 * checked separately. */ 441 uint32_t cbInstrBuf; /* 0x20 */ 442 /** The number of bytes available at pbInstrBuf in total (for IEMExecLots). 443 * This takes the CS segment limit into account. */ 444 uint16_t cbInstrBufTotal; /* 0x24 */ 445 /** Offset into pbInstrBuf of the first byte of the current instruction. */ 446 uint16_t offCurInstrStart; /* 0x26 */ 447 448 /** The prefix mask (IEM_OP_PRF_XXX). */ 449 uint32_t fPrefixes; /* 0x28 */ 427 450 /** The extra REX ModR/M register field bit (REX.R << 3). */ 428 uint8_t uRexReg; 451 uint8_t uRexReg; /* 0x2c */ 429 452 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit 430 453 * (REX.B << 3). */ 431 uint8_t uRexB; 454 uint8_t uRexB; /* 0x2d */ 455 /** The extra REX SIB index field bit (REX.X << 3). */ 456 uint8_t uRexIndex; /* 0x2e */ 457 458 /** The effective segment register (X86_SREG_XXX). */ 459 uint8_t iEffSeg; /* 0x2f */ 460 461 #else 462 /** The current offset into abOpcodes. */ 463 uint8_t offOpcode; /* 0x0b */ 464 /** The size of what has currently been fetched into abOpcodes. */ 465 uint8_t cbOpcode; /* 0x0c */ 466 467 /** The effective segment register (X86_SREG_XXX). */ 468 uint8_t iEffSeg; /* 0x0d */ 469 470 /** The extra REX ModR/M register field bit (REX.R << 3). */ 471 uint8_t uRexReg; /* 0x0e */ 472 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit 473 * (REX.B << 3). */ 474 uint8_t uRexB; /* 0x0f */ 432 475 /** The prefix mask (IEM_OP_PRF_XXX). */ 433 uint32_t fPrefixes; 476 uint32_t fPrefixes; /* 0x10 */ 434 477 /** The extra REX SIB index field bit (REX.X << 3). */ 435 uint8_t uRexIndex; 436 437 /** Offset into abOpcodes where the FPU instruction starts. 438 * Only set by the FPU escape opcodes (0xd8-0xdf) and used later on when the 439 * instruction result is committed. */ 440 uint8_t offFpuOpcode; 478 uint8_t uRexIndex; /* 0x14 */ 441 479 442 480 /** Explicit alignment padding. */ 443 uint8_t abAlignment1[2]; 481 uint8_t abAlignment1[3]; /* 0x15 */ 482 #endif 444 483 445 484 /** The effective operand mode . */ 446 IEMMODE enmEffOpSize; 485 IEMMODE enmEffOpSize; /* 0x30, 0x18 */ 447 486 /** The default addressing mode . */ 448 IEMMODE enmDefAddrMode; 487 IEMMODE enmDefAddrMode; /* 0x34, 0x1c */ 449 488 /** The effective addressing mode . */ 450 IEMMODE enmEffAddrMode; 489 IEMMODE enmEffAddrMode; /* 0x38, 0x20 */ 451 490 /** The default operand mode . */ 452 IEMMODE enmDefOpSize; 491 IEMMODE enmDefOpSize; /* 0x3c, 0x24 */ 492 493 /** The FPU opcode (FOP). */ 494 uint16_t uFpuOpcode; /* 0x40, 0x28 */ 495 /** Align the opcode buffer on a dword boundrary. */ 496 uint8_t abAlignment2a[2]; /* 0x42, 0x2a */ 453 497 454 498 /** The opcode bytes. */ 455 uint8_t abOpcode[15]; 499 uint8_t abOpcode[15]; /* 0x44, 0x2c */ 456 500 /** Explicit alignment padding. */ 457 uint8_t abAlignment2[HC_ARCH_BITS == 64 ? 5 : 5]; 501 #ifdef IEM_WITH_CODE_TLB 502 uint8_t abAlignment2b[1+4]; /* 0x53 */ 503 #else 504 uint8_t abAlignment2b[1+28]; /* 0x3b */ 505 #endif 458 506 /** @} */ 459 507 508 460 509 /** The flags of the current exception / interrupt. */ 461 uint32_t fCurXcpt; 510 uint32_t fCurXcpt; /* 0x58, 0x58 */ 462 511 /** The current exception / interrupt. */ 463 512 uint8_t uCurXcpt; … … 524 573 /** Pointer set jump buffer - raw-mode context. */ 525 574 RCPTRTYPE(jmp_buf *) pJmpBufRC; 526 527 575 528 576 /** @name Statistics … … 549 597 /** Number of long jumps. */ 550 598 uint32_t cLongJumps; 551 uint32_t u Padding; /**< Alignment padding. */599 uint32_t uAlignment6; /**< Alignment padding. */ 552 600 #ifdef IEM_VERIFICATION_MODE_FULL 553 601 /** The Number of I/O port reads that has been performed. */ … … 574 622 * emR3ExecuteInstruction and iemExecVerificationModeCheck. */ 575 623 uint8_t cVerifyDepth; 576 bool afAlignment 2[2];624 bool afAlignment7[2]; 577 625 /** Mask of undefined eflags. 578 626 * The verifier will any difference in these flags. */ … … 605 653 /** @} */ 606 654 607 uint32_t au32Alignment 6[HC_ARCH_BITS == 64 ? 1 + 4 + 8 : 1 + 2 + 4]; /**< Alignment padding. */655 uint32_t au32Alignment8[HC_ARCH_BITS == 64 ? 1 + 2 + 8 : 1 + 2]; /**< Alignment padding. */ 608 656 609 657 /** Data TLB. … … 613 661 * @remarks Must be 64-byte aligned. */ 614 662 IEMTLB CodeTlb; 663 664 /** Pointer to the CPU context - ring-3 context. 665 * @todo put inside IEM_VERIFICATION_MODE_FULL++. */ 666 R3PTRTYPE(PCPUMCTX) pCtxR3; 667 /** Pointer to the CPU context - ring-0 context. */ 668 R0PTRTYPE(PCPUMCTX) pCtxR0; 669 /** Pointer to the CPU context - raw-mode context. */ 670 RCPTRTYPE(PCPUMCTX) pCtxRC; 671 /** Alignment padding. */ 672 RTRCPTR uAlignment9; 615 673 616 674 #ifdef IEM_VERIFICATION_MODE_FULL … … 642 700 #if !defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE) \ 643 701 && !defined(IEM_VERIFICATION_MODE_MINIMAL) && defined(VMCPU_INCL_CPUM_GST_CTX) 644 # define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx)702 # define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx) 645 703 #else 646 # define IEM_GET_CTX(a_pVCpu) ((a_pVCpu)->iem.s.CTX_SUFF(pCtx))704 # define IEM_GET_CTX(a_pVCpu) ((a_pVCpu)->iem.s.CTX_SUFF(pCtx)) 647 705 #endif 648 706 … … 652 710 */ 653 711 #if IEM_CFG_TARGET_CPU != IEMTARGETCPU_DYNAMIC 654 # define IEM_GET_TARGET_CPU(a_pVCpu) (IEM_CFG_TARGET_CPU)712 # define IEM_GET_TARGET_CPU(a_pVCpu) (IEM_CFG_TARGET_CPU) 655 713 #else 656 # define IEM_GET_TARGET_CPU(a_pVCpu) ((a_pVCpu)->iem.s.uTargetCpu) 657 #endif 714 # define IEM_GET_TARGET_CPU(a_pVCpu) ((a_pVCpu)->iem.s.uTargetCpu) 715 #endif 716 717 /** @def Gets the instruction length. */ 718 #ifdef IEM_WITH_CODE_TLB 719 # define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offInstrNextByte - (uint32_t)(a_pVCpu)->iem.s.offCurInstrStart) 720 #else 721 # define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offOpcode) 722 #endif 723 658 724 659 725 /** @name IEM_ACCESS_XXX - Access details.
Note:
See TracChangeset
for help on using the changeset viewer.