Changeset 73073 in vbox
- Timestamp:
- Jul 11, 2018 4:19:48 PM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 123634
- Location:
- trunk
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/vm.h
r72569 r73073 847 847 #endif 848 848 849 /** @def VMSTATE_IS_RUNNING 850 * Checks if the given state indicates a running VM. 851 */ 852 #define VMSTATE_IS_RUNNING(a_enmVMState) \ 853 ( (enmVMState) == VMSTATE_RUNNING \ 854 || (enmVMState) == VMSTATE_RUNNING_LS \ 855 || (enmVMState) == VMSTATE_RUNNING_FT ) 856 849 857 /** @def VM_IS_RUNNING_FOR_ASSERTIONS_ONLY 850 * Checks if the the VM is running. 851 * @note Thie is only for pure debug assertions. No AssertReturn or similar! 858 * Checks if the VM is running. 859 * @note This is only for pure debug assertions. No AssertReturn or similar! 860 * @sa VMSTATE_IS_RUNNING 852 861 */ 853 862 #define VM_IS_RUNNING_FOR_ASSERTIONS_ONLY(pVM) \ -
trunk/include/iprt/x86.h
r72131 r73073 1697 1697 #define X86_PAGE_4M_BASE_MASK_32 0xffc00000U 1698 1698 1699 /** The size of a 1GB page. */ 1700 #define X86_PAGE_1G_SIZE _1G 1701 /** The page shift of a 1GB page. */ 1702 #define X86_PAGE_1G_SHIFT 30 1703 /** The 1GB page offset mask. */ 1704 #define X86_PAGE_1G_OFFSET_MASK 0x3fffffff 1705 /** The 1GB page base mask for virtual addresses. */ 1706 #define X86_PAGE_1G_BASE_MASK UINT64_C(0xffffffffc0000000) 1707 1699 1708 /** 1700 1709 * Check if the given address is canonical. -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r72265 r73073 1431 1431 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 1432 1432 * @param GCPtr The guest virtual address to walk by. 1433 * @param pWalk Where to return the walk result. This is valid onsome1433 * @param pWalk Where to return the walk result. This is valid for some 1434 1434 * error codes as well. 1435 1435 */ … … 1471 1471 return VERR_PGM_NOT_USED_IN_MODE; 1472 1472 } 1473 } 1474 1475 1476 /** 1477 * Tries to continue the previous walk. 1478 * 1479 * @note Requires the caller to hold the PGM lock from the first 1480 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise 1481 * we cannot use the pointers. 1482 * 1483 * @returns VBox status code. 1484 * @retval VINF_SUCCESS on success. 1485 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details. 1486 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is 1487 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID. 1488 * 1489 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 1490 * @param GCPtr The guest virtual address to walk by. 1491 * @param pWalk Pointer to the previous walk result and where to return 1492 * the result of this walk. This is valid for some error 1493 * codes as well. 1494 */ 1495 int pgmGstPtWalkNext(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk) 1496 { 1497 /* 1498 * We can only handle successfully walks. 1499 * We also limit ourselves to the next page. 1500 */ 1501 if ( pWalk->u.Core.fSucceeded 1502 && GCPtr - pWalk->u.Core.GCPtr == PAGE_SIZE) 1503 { 1504 Assert(pWalk->u.Core.uLevel == 0); 1505 if (pWalk->enmType == PGMPTWALKGSTTYPE_AMD64) 1506 { 1507 /* 1508 * AMD64 1509 */ 1510 if (!pWalk->u.Core.fGigantPage && !pWalk->u.Core.fBigPage) 1511 { 1512 /* 1513 * We fall back to full walk if the PDE table changes, if any 1514 * reserved bits are set, or if the effective page access changes. 1515 */ 1516 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT 1517 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX; 1518 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT 1519 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS; 1520 1521 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PD_PAE_SHIFT)) 1522 { 1523 if (pWalk->u.Amd64.pPte) 1524 { 1525 X86PTEPAE Pte; 1526 Pte.u = pWalk->u.Amd64.pPte[1].u; 1527 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame) 1528 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask)) 1529 { 1530 1531 pWalk->u.Core.GCPtr = GCPtr; 1532 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK; 1533 pWalk->u.Amd64.Pte.u = Pte.u; 1534 pWalk->u.Amd64.pPte++; 1535 return VINF_SUCCESS; 1536 } 1537 } 1538 } 1539 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PDPT_SHIFT)) 1540 { 1541 Assert(!((GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK)); /* Must be first PT entry. */ 1542 if (pWalk->u.Amd64.pPde) 1543 { 1544 X86PDEPAE Pde; 1545 Pde.u = pWalk->u.Amd64.pPde[1].u; 1546 if ( (Pde.u & fPdeSame) == (Pde.u & fPdeSame) 1547 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask)) 1548 { 1549 /* Get the new PTE and check out the first entry. */ 1550 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)), 1551 &pWalk->u.Amd64.pPt); 1552 if (RT_SUCCESS(rc)) 1553 { 1554 pWalk->u.Amd64.pPte = &pWalk->u.Amd64.pPt->a[0]; 1555 X86PTEPAE Pte; 1556 Pte.u = pWalk->u.Amd64.pPte->u; 1557 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame) 1558 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask)) 1559 { 1560 pWalk->u.Core.GCPtr = GCPtr; 1561 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK; 1562 pWalk->u.Amd64.Pte.u = Pte.u; 1563 pWalk->u.Amd64.Pde.u = Pde.u; 1564 pWalk->u.Amd64.pPde++; 1565 return VINF_SUCCESS; 1566 } 1567 } 1568 } 1569 } 1570 } 1571 } 1572 else if (!pWalk->u.Core.fGigantPage) 1573 { 1574 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_2M_BASE_MASK)) 1575 { 1576 pWalk->u.Core.GCPtr = GCPtr; 1577 pWalk->u.Core.GCPhys += PAGE_SIZE; 1578 return VINF_SUCCESS; 1579 } 1580 } 1581 else 1582 { 1583 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_1G_BASE_MASK)) 1584 { 1585 pWalk->u.Core.GCPtr = GCPtr; 1586 pWalk->u.Core.GCPhys += PAGE_SIZE; 1587 return VINF_SUCCESS; 1588 } 1589 } 1590 } 1591 } 1592 /* Case we don't handle. Do full walk. */ 1593 return pgmGstPtWalk(pVCpu, GCPtr, pWalk); 1473 1594 } 1474 1595 -
trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp
r69111 r73073 867 867 pgmR3DbgSelectMemScanFunction(&pfnMemScan, (uint32_t)GCPtrAlign, cbNeedle); 868 868 869 uint32_t cYieldCountDown = 4096; 869 VMSTATE enmVMState = pVM->enmVMState; 870 uint32_t const cYieldCountDownReload = VMSTATE_IS_RUNNING(enmVMState) ? 4096 : 65536; 871 uint32_t cYieldCountDown = cYieldCountDownReload; 872 RTGCPHYS GCPhysPrev = NIL_RTGCPHYS; 873 bool fFullWalk = true; 874 PGMPTWALKGST Walk; 875 RT_ZERO(Walk); 876 870 877 pgmLock(pVM); 871 878 for (;; offPage = 0) 872 879 { 873 PGMPTWALKGST Walk; 874 int rc = pgmGstPtWalk(pVCpu, GCPtr, &Walk); 880 int rc; 881 if (fFullWalk) 882 rc = pgmGstPtWalk(pVCpu, GCPtr, &Walk); 883 else 884 rc = pgmGstPtWalkNext(pVCpu, GCPtr, &Walk); 875 885 if (RT_SUCCESS(rc) && Walk.u.Core.fSucceeded) 876 886 { 877 PPGMPAGE pPage = pgmPhysGetPage(pVM, Walk.u.Core.GCPhys); 878 if ( pPage 879 && ( !PGM_PAGE_IS_ZERO(pPage) 880 || fAllZero) 881 && !PGM_PAGE_IS_MMIO_OR_ALIAS(pPage) 882 && !PGM_PAGE_IS_BALLOONED(pPage)) 887 fFullWalk = false; 888 889 /* Skip if same page as previous one (W10 optimization). */ 890 if ( Walk.u.Core.GCPhys != GCPhysPrev 891 || cbPrev != 0) 883 892 { 884 void const *pvPage; 885 PGMPAGEMAPLOCK Lock; 886 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, Walk.u.Core.GCPhys, &pvPage, &Lock); 887 if (RT_SUCCESS(rc)) 893 PPGMPAGE pPage = pgmPhysGetPage(pVM, Walk.u.Core.GCPhys); 894 if ( pPage 895 && ( !PGM_PAGE_IS_ZERO(pPage) 896 || fAllZero) 897 && !PGM_PAGE_IS_MMIO_OR_ALIAS(pPage) 898 && !PGM_PAGE_IS_BALLOONED(pPage)) 888 899 { 889 int32_t offHit = offPage; 890 bool fRc; 891 if (GCPtrAlign < PAGE_SIZE) 900 GCPhysPrev = Walk.u.Core.GCPhys; 901 void const *pvPage; 902 PGMPAGEMAPLOCK Lock; 903 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, Walk.u.Core.GCPhys, &pvPage, &Lock); 904 if (RT_SUCCESS(rc)) 892 905 { 893 uint32_t cbSearch = cPages > 0 894 ? PAGE_SIZE - (uint32_t)offPage 895 : (GCPtrLast & PAGE_OFFSET_MASK) + 1 - (uint32_t)offPage; 896 fRc = pgmR3DbgScanPage((uint8_t const *)pvPage, &offHit, cbSearch, (uint32_t)GCPtrAlign, 897 pabNeedle, cbNeedle, pfnMemScan, &abPrev[0], &cbPrev); 906 int32_t offHit = offPage; 907 bool fRc; 908 if (GCPtrAlign < PAGE_SIZE) 909 { 910 uint32_t cbSearch = cPages > 0 911 ? PAGE_SIZE - (uint32_t)offPage 912 : (GCPtrLast & PAGE_OFFSET_MASK) + 1 - (uint32_t)offPage; 913 fRc = pgmR3DbgScanPage((uint8_t const *)pvPage, &offHit, cbSearch, (uint32_t)GCPtrAlign, 914 pabNeedle, cbNeedle, pfnMemScan, &abPrev[0], &cbPrev); 915 } 916 else 917 fRc = memcmp(pvPage, pabNeedle, cbNeedle) == 0 918 && (GCPtrLast - GCPtr) >= cbNeedle; 919 PGMPhysReleasePageMappingLock(pVM, &Lock); 920 if (fRc) 921 { 922 *pGCPtrHit = GCPtr + offHit; 923 pgmUnlock(pVM); 924 return VINF_SUCCESS; 925 } 898 926 } 899 927 else 900 fRc = memcmp(pvPage, pabNeedle, cbNeedle) == 0 901 && (GCPtrLast - GCPtr) >= cbNeedle; 902 PGMPhysReleasePageMappingLock(pVM, &Lock); 903 if (fRc) 904 { 905 *pGCPtrHit = GCPtr + offHit; 906 pgmUnlock(pVM); 907 return VINF_SUCCESS; 908 } 928 cbPrev = 0; /* ignore error. */ 909 929 } 910 930 else 911 cbPrev = 0; /* ignore error. */931 cbPrev = 0; 912 932 } 913 933 else … … 966 986 if (cPages <= cPagesCanSkip) 967 987 break; 988 fFullWalk = true; 968 989 if (cPagesCanSkip >= cIncPages) 969 990 { … … 983 1004 if (!--cYieldCountDown) 984 1005 { 985 PDMR3CritSectYield(&pVM->pgm.s.CritSectX);986 cYieldCountDown = 4096;1006 fFullWalk = PDMR3CritSectYield(&pVM->pgm.s.CritSectX); 1007 cYieldCountDown = cYieldCountDownReload; 987 1008 } 988 1009 } -
trunk/src/VBox/VMM/include/PGMInternal.h
r70977 r73073 4323 4323 int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4); 4324 4324 int pgmGstPtWalk(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk); 4325 int pgmGstPtWalkNext(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk); 4325 4326 4326 4327 # if defined(VBOX_STRICT) && HC_ARCH_BITS == 64 && defined(IN_RING3)
Note:
See TracChangeset
for help on using the changeset viewer.