- Timestamp:
- Jul 18, 2016 1:58:10 PM (9 years ago)
- Location:
- trunk
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/iem.h
r61968 r62302 117 117 VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp); 118 118 119 VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm); 120 VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr); 121 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu); 122 123 119 124 /** @name Given Instruction Interpreters 120 125 * @{ */ -
trunk/include/VBox/vmm/pgm.h
r60847 r62302 619 619 VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap); 620 620 VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void const *pvSrc, size_t cb, bool fRaiseTrap); 621 621 622 VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers, void **ppv, PPGMPAGEMAPLOCK pLock); 622 623 VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVM pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers); 624 VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev, 625 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 626 R3PTRTYPE(uint8_t *) *ppb, 627 #else 628 R3R0PTRTYPE(uint8_t *) *ppb, 629 #endif 630 uint64_t *pfTlb); 631 /** @name Flags returned by PGMPhysIemGCPhys2PtrNoLock 632 * @{ */ 633 #define PGMIEMGCPHYS2PTR_F_NO_WRITE RT_BIT_32(3) /**< Not writable (IEMTLBE_F_PG_NO_WRITE). */ 634 #define PGMIEMGCPHYS2PTR_F_NO_READ RT_BIT_32(4) /**< Not readable (IEMTLBE_F_PG_NO_READ). */ 635 #define PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 RT_BIT_32(7) /**< No ring-3 mapping (IEMTLBE_F_NO_MAPPINGR3). */ 636 /** @} */ 623 637 624 638 #ifdef VBOX_STRICT -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r62291 r62302 82 82 //#define IEM_LOG_MEMORY_WRITES 83 83 #define IEM_IMPLEMENTS_TASKSWITCH 84 //#define IEM_WITH_CODE_TLB - work in progress85 84 86 85 … … 873 872 pVCpu->iem.s.cbInstrBuf = UINT16_MAX; 874 873 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX; 875 pVCpu->iem.s.offCurInstrStart = UINT16_MAX;874 pVCpu->iem.s.offCurInstrStart = INT16_MAX; 876 875 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff); 877 876 # else … … 1087 1086 pVCpu->iem.s.offInstrNextByte = 0; 1088 1087 pVCpu->iem.s.offCurInstrStart = 0; 1088 pVCpu->iem.s.cbInstrBuf = 0; 1089 pVCpu->iem.s.cbInstrBufTotal = 0; 1089 1090 } 1090 1091 } … … 1093 1094 pVCpu->iem.s.offInstrNextByte = 0; 1094 1095 pVCpu->iem.s.offCurInstrStart = 0; 1096 pVCpu->iem.s.cbInstrBuf = 0; 1097 pVCpu->iem.s.cbInstrBufTotal = 0; 1095 1098 } 1096 1099 #else … … 1312 1315 * @param fVmm Set when PGM calls us with a remapping. 1313 1316 */ 1314 void IEMInvalidTLBs(PVMCPU pVCpu, bool fVmm)1317 VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm) 1315 1318 { 1316 1319 #ifdef IEM_WITH_CODE_TLB 1320 pVCpu->iem.s.cbInstrBufTotal = 0; 1317 1321 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR; 1318 1322 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0) … … 1344 1348 1345 1349 /** 1346 * Invalidates the host physical aspects of the IEM TLBs. 1347 * 1348 * This is called internally as well as by PGM when moving GC mappings. 1350 * Invalidates a page in the TLBs. 1349 1351 * 1350 1352 * @param pVCpu The cross context virtual CPU structure of the calling 1351 1353 * thread. 1352 * @param uTlbPhysRev The revision of the phys stuff. 1353 * @param fFullFlush Whether we're doing a full flush or not. 1354 */ 1355 void IEMInvalidTLBsHostPhys(PVMCPU pVCpu, uint64_t uTlbPhysRev, bool fFullFlush) 1354 * @param GCPtr The address of the page to invalidate 1355 */ 1356 VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr) 1357 { 1358 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) 1359 GCPtr = GCPtr >> X86_PAGE_SHIFT; 1360 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256); 1361 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256); 1362 uintptr_t idx = (uint8_t)GCPtr; 1363 1364 # ifdef IEM_WITH_CODE_TLB 1365 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision)) 1366 { 1367 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0; 1368 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT)) 1369 pVCpu->iem.s.cbInstrBufTotal = 0; 1370 } 1371 # endif 1372 1373 # ifdef IEM_WITH_DATA_TLB 1374 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision)) 1375 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0; 1376 # endif 1377 #else 1378 NOREF(pVCpu); NOREF(GCPtr); 1379 #endif 1380 } 1381 1382 1383 /** 1384 * Invalidates the host physical aspects of the IEM TLBs. 1385 * 1386 * This is called internally as well as by PGM when moving GC mappings. 1387 * 1388 * @param pVCpu The cross context virtual CPU structure of the calling 1389 * thread. 1390 */ 1391 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu) 1356 1392 { 1357 1393 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) 1358 1394 /* Note! This probably won't end up looking exactly like this, but it give an idea... */ 1359 1395 1360 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev; 1361 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev; 1362 1363 if (!fFullFlush) 1364 { /* very likely */ } 1396 # ifdef IEM_WITH_CODE_TLB 1397 pVCpu->iem.s.cbInstrBufTotal = 0; 1398 # endif 1399 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR; 1400 if (uTlbPhysRev != 0) 1401 { 1402 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev; 1403 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev; 1404 } 1365 1405 else 1366 1406 { 1407 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR; 1408 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR; 1409 1367 1410 unsigned i; 1368 1411 # ifdef IEM_WITH_CODE_TLB … … 1370 1413 while (i-- > 0) 1371 1414 { 1372 pVCpu->iem.s.CodeTlb.aEntries[i].p MappingR3= NULL;1415 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL; 1373 1416 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV); 1374 1417 } … … 1378 1421 while (i-- > 0) 1379 1422 { 1380 pVCpu->iem.s.DataTlb.aEntries[i].p MappingR3= NULL;1423 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL; 1381 1424 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV); 1382 1425 } 1383 1426 # endif 1384 1427 } 1385 #endif 1386 NOREF(pVCpu); NOREF(fFullFlush); 1387 } 1388 1428 #else 1429 NOREF(pVCpu); 1430 #endif 1431 } 1432 1433 1434 /** 1435 * Invalidates the host physical aspects of the IEM TLBs. 1436 * 1437 * This is called internally as well as by PGM when moving GC mappings. 1438 * 1439 * @param pVM The cross context VM structure. 1440 * 1441 * @remarks Caller holds the PGM lock. 1442 */ 1443 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM) 1444 { 1445 1446 } 1389 1447 1390 1448 #ifdef IEM_WITH_CODE_TLB … … 1409 1467 IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst) 1410 1468 { 1411 Assert(cbDst <= 8); 1412 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte; 1413 1414 /* 1415 * We might have a partial buffer match, deal with that first to make the 1416 * rest simpler. This is the first part of the cross page/buffer case. 1417 */ 1418 if (pVCpu->iem.s.pbInstrBuf != NULL) 1419 { 1420 if (offBuf < pVCpu->iem.s.cbInstrBuf) 1469 #ifdef IN_RING3 1470 //__debugbreak(); 1471 #else 1472 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR); 1473 #endif 1474 for (;;) 1475 { 1476 Assert(cbDst <= 8); 1477 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte; 1478 1479 /* 1480 * We might have a partial buffer match, deal with that first to make the 1481 * rest simpler. This is the first part of the cross page/buffer case. 1482 */ 1483 if (pVCpu->iem.s.pbInstrBuf != NULL) 1421 1484 { 1422 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf); 1423 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte; 1424 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy); 1425 1426 cbDst -= cbCopy; 1427 pvDst = (uint8_t *)pvDst + cbCopy; 1428 offBuf += cbCopy; 1429 pVCpu->iem.s.offInstrNextByte += offBuf; 1485 if (offBuf < pVCpu->iem.s.cbInstrBuf) 1486 { 1487 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf); 1488 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte; 1489 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy); 1490 1491 cbDst -= cbCopy; 1492 pvDst = (uint8_t *)pvDst + cbCopy; 1493 offBuf += cbCopy; 1494 pVCpu->iem.s.offInstrNextByte += offBuf; 1495 } 1430 1496 } 1431 } 1432 1433 /* 1434 * Check segment limit, figuring how much we're allowed to access at this point. 1435 */ 1436 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1437 RTGCPTR GCPtrFirst; 1438 uint32_t cbMaxRead; 1439 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 1440 { 1441 GCPtrFirst = pCtx->rip + (offBuf - pVCpu->iem.s.offCurInstrStart); 1442 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst))) 1443 { /* likely */ } 1444 else 1445 iemRaiseGeneralProtectionFault0Jmp(pVCpu); 1446 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK); 1447 } 1448 else 1449 { 1450 GCPtrFirst = pCtx->eip + (offBuf - pVCpu->iem.s.offCurInstrStart); 1451 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); 1452 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit)) 1453 { /* likely */ } 1454 else 1455 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1456 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1; 1457 if (cbMaxRead != 0) 1458 { /* likely */ } 1497 1498 /* 1499 * Check segment limit, figuring how much we're allowed to access at this point. 1500 * 1501 * We will fault immediately if RIP is past the segment limit / in non-canonical 1502 * territory. If we do continue, there are one or more bytes to read before we 1503 * end up in trouble and we need to do that first before faulting. 1504 */ 1505 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1506 RTGCPTR GCPtrFirst; 1507 uint32_t cbMaxRead; 1508 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 1509 { 1510 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart); 1511 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst))) 1512 { /* likely */ } 1513 else 1514 iemRaiseGeneralProtectionFault0Jmp(pVCpu); 1515 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK); 1516 } 1459 1517 else 1460 1518 { 1461 /* Overflowed because address is 0 and limit is max. */ 1462 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX); 1463 cbMaxRead = X86_PAGE_SIZE; 1519 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart); 1520 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); 1521 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit)) 1522 { /* likely */ } 1523 else 1524 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1525 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1; 1526 if (cbMaxRead != 0) 1527 { /* likely */ } 1528 else 1529 { 1530 /* Overflowed because address is 0 and limit is max. */ 1531 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX); 1532 cbMaxRead = X86_PAGE_SIZE; 1533 } 1534 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base; 1535 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK); 1536 if (cbMaxRead2 < cbMaxRead) 1537 cbMaxRead = cbMaxRead2; 1538 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */ 1464 1539 } 1465 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base; 1466 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK); 1467 if (cbMaxRead2 < cbMaxRead) 1468 cbMaxRead = cbMaxRead2; 1469 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */ 1470 } 1471 1472 /* 1473 * Get the TLB entry for this piece of code. 1474 */ 1475 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision; 1476 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256); 1477 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag]; 1478 if (pTlbe->uTag == uTag) 1479 { 1480 /* likely when executing lots of code, otherwise unlikely */ 1540 1541 /* 1542 * Get the TLB entry for this piece of code. 1543 */ 1544 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision; 1545 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256); 1546 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag]; 1547 if (pTlbe->uTag == uTag) 1548 { 1549 /* likely when executing lots of code, otherwise unlikely */ 1481 1550 # ifdef VBOX_WITH_STATISTICS 1482 pVCpu->iem.s.CodeTlb.cTlbHits++;1551 pVCpu->iem.s.CodeTlb.cTlbHits++; 1483 1552 # endif 1484 } 1485 else 1486 { 1487 pVCpu->iem.s.CodeTlb.cTlbMisses++; 1488 pVCpu->iem.s.CodeTlb.cTlbMissesTag++; 1553 } 1554 else 1555 { 1556 pVCpu->iem.s.CodeTlb.cTlbMisses++; 1489 1557 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 1490 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip)) 1558 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip)) 1559 { 1560 pTlbe->uTag = uTag; 1561 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER 1562 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3; 1563 pTlbe->GCPhys = NIL_RTGCPHYS; 1564 pTlbe->pbMappingR3 = NULL; 1565 } 1566 else 1567 # endif 1568 { 1569 RTGCPHYS GCPhys; 1570 uint64_t fFlags; 1571 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys); 1572 if (RT_FAILURE(rc)) 1573 { 1574 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc)); 1575 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc); 1576 } 1577 1578 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1); 1579 pTlbe->uTag = uTag; 1580 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX); 1581 pTlbe->GCPhys = GCPhys; 1582 pTlbe->pbMappingR3 = NULL; 1583 } 1584 } 1585 1586 /* 1587 * Check TLB page table level access flags. 1588 */ 1589 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC)) 1491 1590 { 1492 pTlbe->uTag = uTag; 1493 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER 1494 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3; 1495 pTlbe->GCPhys = NIL_RTGCPHYS; 1496 pTlbe->pMappingR3 = NULL; 1591 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3) 1592 { 1593 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst)); 1594 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1595 } 1596 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE)) 1597 { 1598 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst)); 1599 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1600 } 1601 } 1602 1603 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 1604 /* 1605 * Allow interpretation of patch manager code blocks since they can for 1606 * instance throw #PFs for perfectly good reasons. 1607 */ 1608 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE)) 1609 { /* no unlikely */ } 1610 else 1611 { 1612 /** @todo Could be optimized this a little in ring-3 if we liked. */ 1613 size_t cbRead = 0; 1614 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead); 1615 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc)); 1616 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1)); 1617 return; 1618 } 1619 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 1620 1621 /* 1622 * Look up the physical page info if necessary. 1623 */ 1624 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev) 1625 { /* not necessary */ } 1626 else 1627 { 1628 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE); 1629 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ); 1630 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3); 1631 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV 1632 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE); 1633 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev, 1634 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev); 1635 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc)); 1636 } 1637 1638 # if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE)) 1639 /* 1640 * Try do a direct read using the pbMappingR3 pointer. 1641 */ 1642 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ)) 1643 == pVCpu->iem.s.CodeTlb.uTlbPhysRev) 1644 { 1645 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK); 1646 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead; 1647 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart) 1648 { 1649 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead); 1650 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg; 1651 } 1652 else 1653 { 1654 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart; 1655 Assert(cbInstr < cbMaxRead); 1656 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr; 1657 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr); 1658 } 1659 if (cbDst <= cbMaxRead) 1660 { 1661 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst; 1662 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK; 1663 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3; 1664 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst); 1665 return; 1666 } 1667 pVCpu->iem.s.pbInstrBuf = NULL; 1668 1669 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead); 1670 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead; 1497 1671 } 1498 1672 else 1499 1673 # endif 1674 #if 0 1675 /* 1676 * If there is no special read handling, so we can read a bit more and 1677 * put it in the prefetch buffer. 1678 */ 1679 if ( cbDst < cbMaxRead 1680 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev) 1500 1681 { 1501 RTGCPHYS GCPhys; 1502 uint64_t fFlags; 1503 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys); 1504 if (RT_FAILURE(rc)) 1682 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys, 1683 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM); 1684 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 1685 { /* likely */ } 1686 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict)) 1505 1687 { 1506 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc)); 1507 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc); 1688 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n", 1689 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead)); 1690 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1691 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict))); 1508 1692 } 1509 1510 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1); 1511 pTlbe->uTag = uTag; 1512 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX); 1513 pTlbe->GCPhys = GCPhys; 1514 pTlbe->pMappingR3 = NULL; 1693 else 1694 { 1695 Log((RT_SUCCESS(rcStrict) 1696 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n" 1697 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n", 1698 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead)); 1699 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 1700 } 1515 1701 } 1516 } 1517 1518 /* 1519 * Check TLB page table level access flags. 1520 */ 1521 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC)) 1522 { 1523 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3) 1702 /* 1703 * Special read handling, so only read exactly what's needed. 1704 * This is a highly unlikely scenario. 1705 */ 1706 else 1707 #endif 1524 1708 { 1525 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst)); 1526 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1709 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++; 1710 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead); 1711 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), 1712 pvDst, cbToRead, PGMACCESSORIGIN_IEM); 1713 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 1714 { /* likely */ } 1715 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict)) 1716 { 1717 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n", 1718 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead)); 1719 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1720 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict))); 1721 } 1722 else 1723 { 1724 Log((RT_SUCCESS(rcStrict) 1725 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n" 1726 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n", 1727 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead)); 1728 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 1729 } 1730 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead; 1731 if (cbToRead == cbDst) 1732 return; 1527 1733 } 1528 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE)) 1529 { 1530 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst)); 1531 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1532 } 1533 } 1534 1535 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 1536 /* 1537 * Allow interpretation of patch manager code blocks since they can for 1538 * instance throw #PFs for perfectly good reasons. 1539 */ 1540 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE)) 1541 { /* no unlikely */ } 1542 else 1543 { 1544 /** @todo Could be optimized this a little in ring-3 if we liked. */ 1545 size_t cbRead = 0; 1546 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead); 1547 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc)); 1548 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1)); 1549 return; 1550 } 1551 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 1552 1553 /* 1554 * Look up the physical page info if necessary. 1555 */ 1556 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev) 1557 { /* not necessary */ } 1558 else 1559 { 1560 } 1561 1562 1563 # if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE)) 1564 /* 1565 * Try do a direct read using the pMappingR3 pointer. 1566 */ 1567 if (!(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ)) 1568 { 1569 1570 } 1571 # endif 1572 1573 1574 # if 0 1575 /* 1576 * Read the bytes at this address. 1577 * 1578 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already, 1579 * and since PATM should only patch the start of an instruction there 1580 * should be no need to check again here. 1581 */ 1582 if (!pVCpu->iem.s.fBypassHandlers) 1583 { 1584 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], 1585 cbToTryRead, PGMACCESSORIGIN_IEM); 1586 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 1587 { /* likely */ } 1588 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict)) 1589 { 1590 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n", 1591 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead)); 1592 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1593 } 1594 else 1595 { 1596 Log((RT_SUCCESS(rcStrict) 1597 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n" 1598 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n", 1599 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead)); 1600 return rcStrict; 1601 } 1602 } 1603 else 1604 { 1605 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead); 1606 if (RT_SUCCESS(rc)) 1607 { /* likely */ } 1608 else 1609 { 1610 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc)); 1611 return rc; 1612 } 1613 } 1614 pVCpu->iem.s.cbOpcode += cbToTryRead; 1615 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode)); 1616 # endif 1734 1735 /* 1736 * More to read, loop. 1737 */ 1738 cbDst -= cbMaxRead; 1739 pvDst = (uint8_t *)pvDst + cbMaxRead; 1740 } 1617 1741 } 1618 1742 -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r60867 r62302 24 24 #include <VBox/vmm/cpum.h> 25 25 #include <VBox/vmm/selm.h> 26 #include <VBox/vmm/iem.h> 26 27 #include <VBox/vmm/iom.h> 27 28 #include <VBox/sup.h> … … 731 732 REMNotifyInvalidatePage(pVM, GCPtrPage); 732 733 #endif /* !IN_RING3 */ 734 IEMTlbInvalidatePage(pVCpu, GCPtrPage); 733 735 734 736 … … 2021 2023 } 2022 2024 2025 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/); 2023 2026 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a); 2024 2027 return rc; -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r60948 r62302 4377 4377 4378 4378 4379 /** 4380 * Converts a GC physical address to a HC ring-3 pointer, with some 4381 * additional checks. 4382 * 4383 * @returns VBox status code (no informational statuses). 4384 * 4385 * @param pVM The cross context VM structure. 4386 * @param pVCpu The cross context virtual CPU structure of the 4387 * calling EMT. 4388 * @param GCPhys The GC physical address to convert. This API mask 4389 * the A20 line when necessary. 4390 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to 4391 * be done while holding the PGM lock. 4392 * @param ppb Where to store the pointer corresponding to GCPhys 4393 * on success. 4394 * @param pfTlb The TLB flags and revision. We only add stuff. 4395 * 4396 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and 4397 * PGMPhysIemGCPhys2Ptr. 4398 * 4399 * @thread EMT(pVCpu). 4400 */ 4401 VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev, 4402 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 4403 R3PTRTYPE(uint8_t *) *ppb, 4404 #else 4405 R3R0PTRTYPE(uint8_t *) *ppb, 4406 #endif 4407 uint64_t *pfTlb) 4408 { 4409 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys); 4410 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK)); 4411 4412 pgmLock(pVM); 4413 4414 PPGMRAMRANGE pRam; 4415 PPGMPAGE pPage; 4416 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam); 4417 if (RT_SUCCESS(rc)) 4418 { 4419 if (!PGM_PAGE_IS_BALLOONED(pPage)) 4420 { 4421 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage)) 4422 { 4423 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage)) 4424 { 4425 /* 4426 * No access handler. 4427 */ 4428 switch (PGM_PAGE_GET_STATE(pPage)) 4429 { 4430 case PGM_PAGE_STATE_ALLOCATED: 4431 *pfTlb |= *puTlbPhysRev; 4432 break; 4433 case PGM_PAGE_STATE_BALLOONED: 4434 AssertFailed(); 4435 case PGM_PAGE_STATE_ZERO: 4436 case PGM_PAGE_STATE_SHARED: 4437 case PGM_PAGE_STATE_WRITE_MONITORED: 4438 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE; 4439 break; 4440 } 4441 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 4442 *pfTlb |= PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3; 4443 *ppb = NULL; 4444 #else 4445 PPGMPAGER3MAPTLBE pTlbe; 4446 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe); 4447 AssertLogRelRCReturn(rc, rc); 4448 *ppb = (uint8_t *)pTlbe->pv; 4449 #endif 4450 } 4451 else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) 4452 { 4453 /* 4454 * MMIO or similar all access handler: Catch all access. 4455 */ 4456 *pfTlb |= *puTlbPhysRev 4457 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3; 4458 *ppb = NULL; 4459 } 4460 else 4461 { 4462 /* 4463 * Write access handler: Catch write accesses if active. 4464 */ 4465 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 4466 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE; 4467 else 4468 switch (PGM_PAGE_GET_STATE(pPage)) 4469 { 4470 case PGM_PAGE_STATE_ALLOCATED: 4471 *pfTlb |= *puTlbPhysRev; 4472 break; 4473 case PGM_PAGE_STATE_BALLOONED: 4474 AssertFailed(); 4475 case PGM_PAGE_STATE_ZERO: 4476 case PGM_PAGE_STATE_SHARED: 4477 case PGM_PAGE_STATE_WRITE_MONITORED: 4478 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE; 4479 break; 4480 } 4481 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 4482 *pfTlb |= PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3; 4483 *ppb = NULL; 4484 #else 4485 PPGMPAGER3MAPTLBE pTlbe; 4486 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe); 4487 AssertLogRelRCReturn(rc, rc); 4488 *ppb = (uint8_t *)pTlbe->pv; 4489 #endif 4490 } 4491 } 4492 else 4493 { 4494 /* Alias MMIO: For now, we catch all access. */ 4495 *pfTlb |= *puTlbPhysRev 4496 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3; 4497 *ppb = NULL; 4498 } 4499 } 4500 else 4501 { 4502 /* Ballooned: Shouldn't get here, but we read zero page via PGMPhysRead and writes goes to /dev/null. */ 4503 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3; 4504 *ppb = NULL; 4505 } 4506 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#x pPage=%R[pgmpage]\n", GCPhys, rc, *ppb, *pfTlb, pPage)); 4507 } 4508 else 4509 { 4510 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3; 4511 *ppb = NULL; 4512 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#x\n", GCPhys, rc, pPage, *ppb, *pfTlb)); 4513 } 4514 4515 pgmUnlock(pVM); 4516 return VINF_SUCCESS; 4517 } 4379 4518 4380 4519 -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r62016 r62302 60 60 VMMR3DECL(int) IEMR3Init(PVM pVM) 61 61 { 62 uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U); 63 uint64_t const uInitialTlbPhysRev = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U); 64 62 65 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 63 66 { … … 67 70 pVCpu->iem.s.pCtxRC = VM_RC_ADDR(pVM, pVCpu->iem.s.pCtxR3); 68 71 69 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 70 "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu); 71 STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, 72 "Number of longjmp calls", "/IEM/CPU%u/cLongJumps", idCpu); 73 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 74 "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu); 75 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 76 "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu); 77 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 78 "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu); 79 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 80 "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu); 81 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 82 "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu); 83 STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, 84 "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu); 85 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, 72 pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision; 73 pVCpu->iem.s.CodeTlb.uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev = uInitialTlbPhysRev; 74 75 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 76 "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu); 77 STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, 78 "Number of longjmp calls", "/IEM/CPU%u/cLongJumps", idCpu); 79 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 80 "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu); 81 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 82 "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu); 83 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 84 "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu); 85 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 86 "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu); 87 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 88 "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu); 89 STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, 90 "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu); 91 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, 86 92 "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu); 93 94 #ifdef VBOX_WITH_STATISTICS 95 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 96 "Code TLB hits", "/IEM/CPU%u/CodeTlb-Hits", idCpu); 97 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 98 "Data TLB hits", "/IEM/CPU%u/DataTlb-Hits", idCpu); 99 #endif 100 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbMisses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 101 "Code TLB misses", "/IEM/CPU%u/CodeTlb-Misses", idCpu); 102 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 103 "Code TLB revision", "/IEM/CPU%u/CodeTlb-Revision", idCpu); 104 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 105 "Code TLB physical revision", "/IEM/CPU%u/CodeTlb-PhysRev", idCpu); 106 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 107 "Code TLB slow read path", "/IEM/CPU%u/CodeTlb-SlowReads", idCpu); 108 109 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbMisses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 110 "Data TLB misses", "/IEM/CPU%u/DataTlb-Misses", idCpu); 111 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 112 "Data TLB revision", "/IEM/CPU%u/DataTlb-Revision", idCpu); 113 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 114 "Data TLB physical revision", "/IEM/CPU%u/DataTlb-PhysRev", idCpu); 115 87 116 88 117 /* -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r61933 r62302 22 22 #define LOG_GROUP LOG_GROUP_PGM_PHYS 23 23 #include <VBox/vmm/pgm.h> 24 #include <VBox/vmm/iem.h> 24 25 #include <VBox/vmm/iom.h> 25 26 #include <VBox/vmm/mm.h> … … 3762 3763 HMFlushTLB(pVCpu); 3763 3764 #endif 3765 IEMTlbInvalidateAllPhysical(pVCpu); 3764 3766 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cA20Changes); 3765 3767 } -
trunk/src/VBox/VMM/include/IEMInternal.h
r62289 r62302 64 64 #endif 65 65 66 67 //#define IEM_WITH_CODE_TLB// - work in progress 66 68 67 69 … … 310 312 /** Pointer to the ring-3 mapping (possibly also valid in ring-0). */ 311 313 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 312 R3PTRTYPE(uint8_t *) p MappingR3;314 R3PTRTYPE(uint8_t *) pbMappingR3; 313 315 #else 314 R3R0PTRTYPE(uint8_t *) p MappingR3;316 R3R0PTRTYPE(uint8_t *) pbMappingR3; 315 317 #endif 316 318 #if HC_ARCH_BITS == 32 … … 371 373 /** TLB misses. */ 372 374 uint32_t cTlbMisses; 375 /** Slow read path. */ 376 uint32_t cTlbSlowReadPath; 377 #if 0 373 378 /** TLB misses because of tag mismatch. */ 374 379 uint32_t cTlbMissesTag; … … 383 388 /** TLB misses because no r3(/r0) mapping. */ 384 389 uint32_t cTlbMissesMapping; 390 #endif 385 391 /** Alignment padding. */ 386 uint32_t au32Padding[3 ];392 uint32_t au32Padding[3+5]; 387 393 } IEMTLB; 388 394 AssertCompileSizeAlignment(IEMTLB, 64); … … 448 454 * This takes the CS segment limit into account. */ 449 455 uint16_t cbInstrBufTotal; /* 0x24 */ 450 /** Offset into pbInstrBuf of the first byte of the current instruction. */ 451 uint16_t offCurInstrStart; /* 0x26 */ 456 /** Offset into pbInstrBuf of the first byte of the current instruction. 457 * Can be negative to efficiently handle cross page instructions. */ 458 int16_t offCurInstrStart; /* 0x26 */ 452 459 453 460 /** The prefix mask (IEM_OP_PRF_XXX). */ … … 722 729 /** @def Gets the instruction length. */ 723 730 #ifdef IEM_WITH_CODE_TLB 724 # define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offInstrNextByte - (uint32_t)( a_pVCpu)->iem.s.offCurInstrStart)731 # define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offInstrNextByte - (uint32_t)(int32_t)(a_pVCpu)->iem.s.offCurInstrStart) 725 732 #else 726 733 # define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offOpcode)
Note:
See TracChangeset
for help on using the changeset viewer.