Changeset 62302 in vbox for trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
- Timestamp:
- Jul 18, 2016 1:58:10 PM (8 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r62291 r62302 82 82 //#define IEM_LOG_MEMORY_WRITES 83 83 #define IEM_IMPLEMENTS_TASKSWITCH 84 //#define IEM_WITH_CODE_TLB - work in progress85 84 86 85 … … 873 872 pVCpu->iem.s.cbInstrBuf = UINT16_MAX; 874 873 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX; 875 pVCpu->iem.s.offCurInstrStart = UINT16_MAX;874 pVCpu->iem.s.offCurInstrStart = INT16_MAX; 876 875 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff); 877 876 # else … … 1087 1086 pVCpu->iem.s.offInstrNextByte = 0; 1088 1087 pVCpu->iem.s.offCurInstrStart = 0; 1088 pVCpu->iem.s.cbInstrBuf = 0; 1089 pVCpu->iem.s.cbInstrBufTotal = 0; 1089 1090 } 1090 1091 } … … 1093 1094 pVCpu->iem.s.offInstrNextByte = 0; 1094 1095 pVCpu->iem.s.offCurInstrStart = 0; 1096 pVCpu->iem.s.cbInstrBuf = 0; 1097 pVCpu->iem.s.cbInstrBufTotal = 0; 1095 1098 } 1096 1099 #else … … 1312 1315 * @param fVmm Set when PGM calls us with a remapping. 1313 1316 */ 1314 void IEMInvalidTLBs(PVMCPU pVCpu, bool fVmm)1317 VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm) 1315 1318 { 1316 1319 #ifdef IEM_WITH_CODE_TLB 1320 pVCpu->iem.s.cbInstrBufTotal = 0; 1317 1321 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR; 1318 1322 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0) … … 1344 1348 1345 1349 /** 1346 * Invalidates the host physical aspects of the IEM TLBs. 1347 * 1348 * This is called internally as well as by PGM when moving GC mappings. 1350 * Invalidates a page in the TLBs. 1349 1351 * 1350 1352 * @param pVCpu The cross context virtual CPU structure of the calling 1351 1353 * thread. 1352 * @param uTlbPhysRev The revision of the phys stuff. 1353 * @param fFullFlush Whether we're doing a full flush or not. 1354 */ 1355 void IEMInvalidTLBsHostPhys(PVMCPU pVCpu, uint64_t uTlbPhysRev, bool fFullFlush) 1354 * @param GCPtr The address of the page to invalidate 1355 */ 1356 VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr) 1357 { 1358 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) 1359 GCPtr = GCPtr >> X86_PAGE_SHIFT; 1360 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256); 1361 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256); 1362 uintptr_t idx = (uint8_t)GCPtr; 1363 1364 # ifdef IEM_WITH_CODE_TLB 1365 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision)) 1366 { 1367 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0; 1368 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT)) 1369 pVCpu->iem.s.cbInstrBufTotal = 0; 1370 } 1371 # endif 1372 1373 # ifdef IEM_WITH_DATA_TLB 1374 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision)) 1375 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0; 1376 # endif 1377 #else 1378 NOREF(pVCpu); NOREF(GCPtr); 1379 #endif 1380 } 1381 1382 1383 /** 1384 * Invalidates the host physical aspects of the IEM TLBs. 1385 * 1386 * This is called internally as well as by PGM when moving GC mappings. 1387 * 1388 * @param pVCpu The cross context virtual CPU structure of the calling 1389 * thread. 1390 */ 1391 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu) 1356 1392 { 1357 1393 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) 1358 1394 /* Note! This probably won't end up looking exactly like this, but it give an idea... */ 1359 1395 1360 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev; 1361 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev; 1362 1363 if (!fFullFlush) 1364 { /* very likely */ } 1396 # ifdef IEM_WITH_CODE_TLB 1397 pVCpu->iem.s.cbInstrBufTotal = 0; 1398 # endif 1399 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR; 1400 if (uTlbPhysRev != 0) 1401 { 1402 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev; 1403 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev; 1404 } 1365 1405 else 1366 1406 { 1407 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR; 1408 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR; 1409 1367 1410 unsigned i; 1368 1411 # ifdef IEM_WITH_CODE_TLB … … 1370 1413 while (i-- > 0) 1371 1414 { 1372 pVCpu->iem.s.CodeTlb.aEntries[i].p MappingR3= NULL;1415 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL; 1373 1416 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV); 1374 1417 } … … 1378 1421 while (i-- > 0) 1379 1422 { 1380 pVCpu->iem.s.DataTlb.aEntries[i].p MappingR3= NULL;1423 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL; 1381 1424 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV); 1382 1425 } 1383 1426 # endif 1384 1427 } 1385 #endif 1386 NOREF(pVCpu); NOREF(fFullFlush); 1387 } 1388 1428 #else 1429 NOREF(pVCpu); 1430 #endif 1431 } 1432 1433 1434 /** 1435 * Invalidates the host physical aspects of the IEM TLBs. 1436 * 1437 * This is called internally as well as by PGM when moving GC mappings. 1438 * 1439 * @param pVM The cross context VM structure. 1440 * 1441 * @remarks Caller holds the PGM lock. 1442 */ 1443 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM) 1444 { 1445 1446 } 1389 1447 1390 1448 #ifdef IEM_WITH_CODE_TLB … … 1409 1467 IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst) 1410 1468 { 1411 Assert(cbDst <= 8); 1412 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte; 1413 1414 /* 1415 * We might have a partial buffer match, deal with that first to make the 1416 * rest simpler. This is the first part of the cross page/buffer case. 1417 */ 1418 if (pVCpu->iem.s.pbInstrBuf != NULL) 1419 { 1420 if (offBuf < pVCpu->iem.s.cbInstrBuf) 1469 #ifdef IN_RING3 1470 //__debugbreak(); 1471 #else 1472 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR); 1473 #endif 1474 for (;;) 1475 { 1476 Assert(cbDst <= 8); 1477 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte; 1478 1479 /* 1480 * We might have a partial buffer match, deal with that first to make the 1481 * rest simpler. This is the first part of the cross page/buffer case. 1482 */ 1483 if (pVCpu->iem.s.pbInstrBuf != NULL) 1421 1484 { 1422 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf); 1423 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte; 1424 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy); 1425 1426 cbDst -= cbCopy; 1427 pvDst = (uint8_t *)pvDst + cbCopy; 1428 offBuf += cbCopy; 1429 pVCpu->iem.s.offInstrNextByte += offBuf; 1485 if (offBuf < pVCpu->iem.s.cbInstrBuf) 1486 { 1487 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf); 1488 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte; 1489 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy); 1490 1491 cbDst -= cbCopy; 1492 pvDst = (uint8_t *)pvDst + cbCopy; 1493 offBuf += cbCopy; 1494 pVCpu->iem.s.offInstrNextByte += offBuf; 1495 } 1430 1496 } 1431 } 1432 1433 /* 1434 * Check segment limit, figuring how much we're allowed to access at this point. 1435 */ 1436 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1437 RTGCPTR GCPtrFirst; 1438 uint32_t cbMaxRead; 1439 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 1440 { 1441 GCPtrFirst = pCtx->rip + (offBuf - pVCpu->iem.s.offCurInstrStart); 1442 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst))) 1443 { /* likely */ } 1444 else 1445 iemRaiseGeneralProtectionFault0Jmp(pVCpu); 1446 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK); 1447 } 1448 else 1449 { 1450 GCPtrFirst = pCtx->eip + (offBuf - pVCpu->iem.s.offCurInstrStart); 1451 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); 1452 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit)) 1453 { /* likely */ } 1454 else 1455 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1456 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1; 1457 if (cbMaxRead != 0) 1458 { /* likely */ } 1497 1498 /* 1499 * Check segment limit, figuring how much we're allowed to access at this point. 1500 * 1501 * We will fault immediately if RIP is past the segment limit / in non-canonical 1502 * territory. If we do continue, there are one or more bytes to read before we 1503 * end up in trouble and we need to do that first before faulting. 1504 */ 1505 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1506 RTGCPTR GCPtrFirst; 1507 uint32_t cbMaxRead; 1508 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 1509 { 1510 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart); 1511 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst))) 1512 { /* likely */ } 1513 else 1514 iemRaiseGeneralProtectionFault0Jmp(pVCpu); 1515 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK); 1516 } 1459 1517 else 1460 1518 { 1461 /* Overflowed because address is 0 and limit is max. */ 1462 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX); 1463 cbMaxRead = X86_PAGE_SIZE; 1519 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart); 1520 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); 1521 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit)) 1522 { /* likely */ } 1523 else 1524 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1525 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1; 1526 if (cbMaxRead != 0) 1527 { /* likely */ } 1528 else 1529 { 1530 /* Overflowed because address is 0 and limit is max. */ 1531 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX); 1532 cbMaxRead = X86_PAGE_SIZE; 1533 } 1534 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base; 1535 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK); 1536 if (cbMaxRead2 < cbMaxRead) 1537 cbMaxRead = cbMaxRead2; 1538 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */ 1464 1539 } 1465 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base; 1466 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK); 1467 if (cbMaxRead2 < cbMaxRead) 1468 cbMaxRead = cbMaxRead2; 1469 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */ 1470 } 1471 1472 /* 1473 * Get the TLB entry for this piece of code. 1474 */ 1475 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision; 1476 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256); 1477 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag]; 1478 if (pTlbe->uTag == uTag) 1479 { 1480 /* likely when executing lots of code, otherwise unlikely */ 1540 1541 /* 1542 * Get the TLB entry for this piece of code. 1543 */ 1544 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision; 1545 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256); 1546 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag]; 1547 if (pTlbe->uTag == uTag) 1548 { 1549 /* likely when executing lots of code, otherwise unlikely */ 1481 1550 # ifdef VBOX_WITH_STATISTICS 1482 pVCpu->iem.s.CodeTlb.cTlbHits++;1551 pVCpu->iem.s.CodeTlb.cTlbHits++; 1483 1552 # endif 1484 } 1485 else 1486 { 1487 pVCpu->iem.s.CodeTlb.cTlbMisses++; 1488 pVCpu->iem.s.CodeTlb.cTlbMissesTag++; 1553 } 1554 else 1555 { 1556 pVCpu->iem.s.CodeTlb.cTlbMisses++; 1489 1557 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 1490 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip)) 1558 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip)) 1559 { 1560 pTlbe->uTag = uTag; 1561 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER 1562 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3; 1563 pTlbe->GCPhys = NIL_RTGCPHYS; 1564 pTlbe->pbMappingR3 = NULL; 1565 } 1566 else 1567 # endif 1568 { 1569 RTGCPHYS GCPhys; 1570 uint64_t fFlags; 1571 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys); 1572 if (RT_FAILURE(rc)) 1573 { 1574 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc)); 1575 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc); 1576 } 1577 1578 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1); 1579 pTlbe->uTag = uTag; 1580 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX); 1581 pTlbe->GCPhys = GCPhys; 1582 pTlbe->pbMappingR3 = NULL; 1583 } 1584 } 1585 1586 /* 1587 * Check TLB page table level access flags. 1588 */ 1589 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC)) 1491 1590 { 1492 pTlbe->uTag = uTag; 1493 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER 1494 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3; 1495 pTlbe->GCPhys = NIL_RTGCPHYS; 1496 pTlbe->pMappingR3 = NULL; 1591 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3) 1592 { 1593 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst)); 1594 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1595 } 1596 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE)) 1597 { 1598 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst)); 1599 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1600 } 1601 } 1602 1603 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 1604 /* 1605 * Allow interpretation of patch manager code blocks since they can for 1606 * instance throw #PFs for perfectly good reasons. 1607 */ 1608 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE)) 1609 { /* no unlikely */ } 1610 else 1611 { 1612 /** @todo Could be optimized this a little in ring-3 if we liked. */ 1613 size_t cbRead = 0; 1614 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead); 1615 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc)); 1616 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1)); 1617 return; 1618 } 1619 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 1620 1621 /* 1622 * Look up the physical page info if necessary. 1623 */ 1624 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev) 1625 { /* not necessary */ } 1626 else 1627 { 1628 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE); 1629 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ); 1630 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3); 1631 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV 1632 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE); 1633 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev, 1634 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev); 1635 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc)); 1636 } 1637 1638 # if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE)) 1639 /* 1640 * Try do a direct read using the pbMappingR3 pointer. 1641 */ 1642 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ)) 1643 == pVCpu->iem.s.CodeTlb.uTlbPhysRev) 1644 { 1645 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK); 1646 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead; 1647 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart) 1648 { 1649 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead); 1650 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg; 1651 } 1652 else 1653 { 1654 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart; 1655 Assert(cbInstr < cbMaxRead); 1656 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr; 1657 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr); 1658 } 1659 if (cbDst <= cbMaxRead) 1660 { 1661 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst; 1662 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK; 1663 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3; 1664 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst); 1665 return; 1666 } 1667 pVCpu->iem.s.pbInstrBuf = NULL; 1668 1669 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead); 1670 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead; 1497 1671 } 1498 1672 else 1499 1673 # endif 1674 #if 0 1675 /* 1676 * If there is no special read handling, so we can read a bit more and 1677 * put it in the prefetch buffer. 1678 */ 1679 if ( cbDst < cbMaxRead 1680 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev) 1500 1681 { 1501 RTGCPHYS GCPhys; 1502 uint64_t fFlags; 1503 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys); 1504 if (RT_FAILURE(rc)) 1682 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys, 1683 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM); 1684 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 1685 { /* likely */ } 1686 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict)) 1505 1687 { 1506 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc)); 1507 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc); 1688 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n", 1689 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead)); 1690 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1691 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict))); 1508 1692 } 1509 1510 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1); 1511 pTlbe->uTag = uTag; 1512 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX); 1513 pTlbe->GCPhys = GCPhys; 1514 pTlbe->pMappingR3 = NULL; 1693 else 1694 { 1695 Log((RT_SUCCESS(rcStrict) 1696 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n" 1697 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n", 1698 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead)); 1699 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 1700 } 1515 1701 } 1516 } 1517 1518 /* 1519 * Check TLB page table level access flags. 1520 */ 1521 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC)) 1522 { 1523 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3) 1702 /* 1703 * Special read handling, so only read exactly what's needed. 1704 * This is a highly unlikely scenario. 1705 */ 1706 else 1707 #endif 1524 1708 { 1525 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst)); 1526 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1709 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++; 1710 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead); 1711 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), 1712 pvDst, cbToRead, PGMACCESSORIGIN_IEM); 1713 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 1714 { /* likely */ } 1715 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict)) 1716 { 1717 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n", 1718 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead)); 1719 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1720 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict))); 1721 } 1722 else 1723 { 1724 Log((RT_SUCCESS(rcStrict) 1725 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n" 1726 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n", 1727 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead)); 1728 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 1729 } 1730 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead; 1731 if (cbToRead == cbDst) 1732 return; 1527 1733 } 1528 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE)) 1529 { 1530 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst)); 1531 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1532 } 1533 } 1534 1535 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 1536 /* 1537 * Allow interpretation of patch manager code blocks since they can for 1538 * instance throw #PFs for perfectly good reasons. 1539 */ 1540 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE)) 1541 { /* no unlikely */ } 1542 else 1543 { 1544 /** @todo Could be optimized this a little in ring-3 if we liked. */ 1545 size_t cbRead = 0; 1546 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead); 1547 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc)); 1548 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1)); 1549 return; 1550 } 1551 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 1552 1553 /* 1554 * Look up the physical page info if necessary. 1555 */ 1556 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev) 1557 { /* not necessary */ } 1558 else 1559 { 1560 } 1561 1562 1563 # if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE)) 1564 /* 1565 * Try do a direct read using the pMappingR3 pointer. 1566 */ 1567 if (!(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ)) 1568 { 1569 1570 } 1571 # endif 1572 1573 1574 # if 0 1575 /* 1576 * Read the bytes at this address. 1577 * 1578 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already, 1579 * and since PATM should only patch the start of an instruction there 1580 * should be no need to check again here. 1581 */ 1582 if (!pVCpu->iem.s.fBypassHandlers) 1583 { 1584 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], 1585 cbToTryRead, PGMACCESSORIGIN_IEM); 1586 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 1587 { /* likely */ } 1588 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict)) 1589 { 1590 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n", 1591 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead)); 1592 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1593 } 1594 else 1595 { 1596 Log((RT_SUCCESS(rcStrict) 1597 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n" 1598 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n", 1599 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead)); 1600 return rcStrict; 1601 } 1602 } 1603 else 1604 { 1605 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead); 1606 if (RT_SUCCESS(rc)) 1607 { /* likely */ } 1608 else 1609 { 1610 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc)); 1611 return rc; 1612 } 1613 } 1614 pVCpu->iem.s.cbOpcode += cbToTryRead; 1615 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode)); 1616 # endif 1734 1735 /* 1736 * More to read, loop. 1737 */ 1738 cbDst -= cbMaxRead; 1739 pvDst = (uint8_t *)pvDst + cbMaxRead; 1740 } 1617 1741 } 1618 1742
Note:
See TracChangeset
for help on using the changeset viewer.