Changeset 62189 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jul 12, 2016 12:05:16 PM (8 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r62171 r62189 1302 1302 1303 1303 1304 /** 1305 * Invalidates the IEM TLBs. 1306 * 1307 * This is called internally as well as by PGM when moving GC mappings. 1308 * 1309 * @returns 1310 * @param pVCpu The cross context virtual CPU structure of the calling 1311 * thread. 1312 * @param fVmm Set when PGM calls us with a remapping. 1313 */ 1314 void IEMInvalidTLBs(PVMCPU pVCpu, bool fVmm) 1315 { 1316 #ifdef IEM_WITH_CODE_TLB 1317 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR; 1318 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0) 1319 { /* very likely */ } 1320 else 1321 { 1322 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR; 1323 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries); 1324 while (i-- > 0) 1325 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0; 1326 } 1327 #endif 1328 1329 #ifdef IEM_WITH_DATA_TLB 1330 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR; 1331 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0) 1332 { /* very likely */ } 1333 else 1334 { 1335 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR; 1336 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries); 1337 while (i-- > 0) 1338 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0; 1339 } 1340 #endif 1341 NOREF(pVCpu); NOREF(fVmm); 1342 } 1343 1344 1345 /** 1346 * Invalidates the host physical aspects of the IEM TLBs. 1347 * 1348 * This is called internally as well as by PGM when moving GC mappings. 1349 * 1350 * @returns 1351 * @param pVCpu The cross context virtual CPU structure of the calling 1352 * thread. 1353 */ 1354 void IEMInvalidTLBsHostPhys(PVMCPU pVCpu, uint64_t uTlbPhysRev, bool fFullFlush) 1355 { 1356 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) 1357 /* Note! This probably won't end up looking exactly like this, but it give an idea... */ 1358 1359 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev; 1360 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev; 1361 1362 if (!fFlushFlush) 1363 { /* very likely */ } 1364 else 1365 { 1366 unsigned i; 1367 # ifdef IEM_WITH_CODE_TLB 1368 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries); 1369 while (i-- > 0) 1370 { 1371 pVCpu->iem.s.CodeTlb.aEntries[i].pMappingR3 = NULL; 1372 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV) 1373 } 1374 # endif 1375 # ifdef IEM_WITH_DATA_TLB 1376 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries); 1377 while (i-- > 0) 1378 { 1379 pVCpu->iem.s.DataTlb.aEntries[i].pMappingR3 = NULL; 1380 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV) 1381 } 1382 # endif 1383 } 1384 #endif 1385 NOREF(pVCpu); NOREF(fFullFlush); 1386 } 1387 1388 1304 1389 #ifdef IEM_WITH_CODE_TLB 1305 1390 … … 1385 1470 1386 1471 /* 1387 * Try use the code TLB to translate the address.1472 * Get the TLB entry for this piece of code. 1388 1473 */ 1389 1474 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision; … … 1392 1477 if (pTlbe->uTag == uTag) 1393 1478 { 1394 1395 } 1396 1397 1479 /* likely when executing lots of code, otherwise unlikely */ 1480 # ifdef VBOX_WITH_STATISTICS 1481 pVCpu->iem.s.CodeTlb.cTlbHits++; 1482 # endif 1483 } 1484 else 1485 { 1486 pVCpu->iem.s.CodeTlb.cTlbMisses++; 1487 pVCpu->iem.s.CodeTlb.cTlbMissesTag++; 1488 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 1489 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip)) 1490 { 1491 pTlbe->uTag = uTag; 1492 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER 1493 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3; 1494 pTlbe->GCPhys = NIL_RTGCPHYS; 1495 pTlbe->pMappingR3 = NULL; 1496 } 1497 else 1498 # endif 1499 { 1500 RTGCPHYS GCPhys; 1501 uint64_t fFlags; 1502 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys); 1503 if (RT_FAILURE(rc)) 1504 { 1505 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc)); 1506 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc); 1507 } 1508 1509 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1); 1510 pTlbe->uTag = uTag; 1511 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX); 1512 pTlbe->GCPhys = GCPhys; 1513 pTlbe->pMappingR3 = NULL; 1514 } 1515 } 1398 1516 1399 1517 /* 1400 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap. 1401 * 1402 * First translate CS:rIP to a physical address. 1518 * Check TLB access flags. 1403 1519 */ 1404 # if 0 /** @todo later */ 1405 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin); 1406 uint32_t cbToTryRead; 1407 RTGCPTR GCPtrNext; 1408 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 1409 { 1410 cbToTryRead = PAGE_SIZE; 1411 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode; 1412 if (!IEM_IS_CANONICAL(GCPtrNext)) 1413 return iemRaiseGeneralProtectionFault0(pVCpu); 1414 } 1520 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC)) 1521 { 1522 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3) 1523 { 1524 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst)); 1525 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1526 } 1527 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE)) 1528 { 1529 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst)); 1530 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1531 } 1532 } 1533 1534 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 1535 /* 1536 * Allow interpretation of patch manager code blocks since they can for 1537 * instance throw #PFs for perfectly good reasons. 1538 */ 1539 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE)) 1540 { /* no unlikely */ } 1415 1541 else 1416 1542 { 1417 uint32_t GCPtrNext32 = pCtx->eip; 1418 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); 1419 GCPtrNext32 += pVCpu->iem.s.cbOpcode; 1420 if (GCPtrNext32 > pCtx->cs.u32Limit) 1421 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1422 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1; 1423 if (!cbToTryRead) /* overflowed */ 1424 { 1425 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX); 1426 cbToTryRead = UINT32_MAX; 1427 /** @todo check out wrapping around the code segment. */ 1428 } 1429 if (cbToTryRead < cbMin - cbLeft) 1430 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1431 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32; 1432 } 1433 1434 /* Only read up to the end of the page, and make sure we don't read more 1435 than the opcode buffer can hold. */ 1436 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK); 1437 if (cbToTryRead > cbLeftOnPage) 1438 cbToTryRead = cbLeftOnPage; 1439 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode) 1440 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode; 1441 /** @todo r=bird: Convert assertion into undefined opcode exception? */ 1442 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */ 1543 1544 } 1545 1546 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 1547 1548 # if 0 1443 1549 1444 1550 # ifdef VBOX_WITH_RAW_MODE_NOT_R0
Note:
See TracChangeset
for help on using the changeset viewer.