Changeset 18974 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Apr 17, 2009 7:13:31 AM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGM.cpp
r18950 r18974 1629 1629 PGM_REG_COUNTER(&pPGM->StatRCInvlPgSyncMonCR3, "/PGM/RC/InvlPgSyncMonitorCR3", "Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3."); 1630 1630 1631 /* R0 only: */ 1632 PGM_REG_COUNTER(&pPGM->StatR0DynMapMigrateInvlPg, "/PGM/R0/DynMapMigrateInvlPg", "invlpg count in PGMDynMapMigrateAutoSet."); 1633 PGM_REG_PROFILE(&pPGM->StatR0DynMapGCPageInl, "/PGM/R0/DynMapPageGCPageInl", "Calls to pgmR0DynMapGCPageInlined."); 1634 PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlHits, "/PGM/R0/DynMapPageGCPageInl/Hits", "Hash table lookup hits."); 1635 PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlMisses, "/PGM/R0/DynMapPageGCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage."); 1636 PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlRamHits, "/PGM/R0/DynMapPageGCPageInl/RamHits", "1st ram range hits."); 1637 PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlRamMisses, "/PGM/R0/DynMapPageGCPageInl/RamMisses", "1st ram range misses, takes slow path."); 1638 PGM_REG_PROFILE(&pPGM->StatR0DynMapHCPageInl, "/PGM/R0/DynMapPageHCPageInl", "Calls to pgmR0DynMapHCPageInlined."); 1639 PGM_REG_COUNTER(&pPGM->StatR0DynMapHCPageInlHits, "/PGM/R0/DynMapPageHCPageInl/Hits", "Hash table lookup hits."); 1640 PGM_REG_COUNTER(&pPGM->StatR0DynMapHCPageInlMisses, "/PGM/R0/DynMapPageHCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage."); 1641 PGM_REG_COUNTER(&pPGM->StatR0DynMapPage, "/PGM/R0/DynMapPage", "Calls to pgmR0DynMapPage"); 1642 PGM_REG_COUNTER(&pPGM->StatR0DynMapSetOptimize, "/PGM/R0/DynMapPage/SetOptimize", "Calls to pgmDynMapOptimizeAutoSet."); 1643 PGM_REG_COUNTER(&pPGM->StatR0DynMapSetSearchFlushes, "/PGM/R0/DynMapPage/SetSearchFlushes","Set search restorting to subset flushes."); 1644 PGM_REG_COUNTER(&pPGM->StatR0DynMapSetSearchHits, "/PGM/R0/DynMapPage/SetSearchHits", "Set search hits."); 1645 PGM_REG_COUNTER(&pPGM->StatR0DynMapSetSearchMisses, "/PGM/R0/DynMapPage/SetSearchMisses", "Set search misses."); 1646 PGM_REG_PROFILE(&pPGM->StatR0DynMapHCPage, "/PGM/R0/DynMapPage/HCPage", "Calls to PGMDynMapHCPage (ring-0)."); 1647 PGM_REG_COUNTER(&pPGM->StatR0DynMapPageHits0, "/PGM/R0/DynMapPage/Hits0", "Hits at iPage+0"); 1648 PGM_REG_COUNTER(&pPGM->StatR0DynMapPageHits1, "/PGM/R0/DynMapPage/Hits1", "Hits at iPage+1"); 1649 PGM_REG_COUNTER(&pPGM->StatR0DynMapPageHits2, "/PGM/R0/DynMapPage/Hits2", "Hits at iPage+2"); 1650 PGM_REG_COUNTER(&pPGM->StatR0DynMapPageInvlPg, "/PGM/R0/DynMapPage/InvlPg", "invlpg count in pgmR0DynMapPageSlow."); 1651 PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlow, "/PGM/R0/DynMapPage/Slow", "Calls to pgmR0DynMapPageSlow - subtract this from pgmR0DynMapPage to get 1st level hits."); 1652 PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlowLoopHits, "/PGM/R0/DynMapPage/SlowLoopHits" , "Hits in the loop path."); 1653 PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlowLoopMisses, "/PGM/R0/DynMapPage/SlowLoopMisses", "Misses in the loop path. NonLoopMisses = Slow - SlowLoopHit - SlowLoopMisses"); 1654 //PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlowLostHits, "/PGM/R0/DynMapPage/SlowLostHits", "Lost hits."); 1655 PGM_REG_COUNTER(&pPGM->StatR0DynMapSubsets, "/PGM/R0/Subsets", "Times PGMDynMapPushAutoSubset was called."); 1656 PGM_REG_COUNTER(&pPGM->StatR0DynMapPopFlushes, "/PGM/R0/SubsetPopFlushes", "Times PGMDynMapPopAutoSubset flushes the subset."); 1657 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[0], "/PGM/R0/SetSize000..09", "00-09% filled"); 1658 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[1], "/PGM/R0/SetSize010..19", "10-19% filled"); 1659 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[2], "/PGM/R0/SetSize020..29", "20-29% filled"); 1660 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[3], "/PGM/R0/SetSize030..39", "30-39% filled"); 1661 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[4], "/PGM/R0/SetSize040..49", "40-49% filled"); 1662 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[5], "/PGM/R0/SetSize050..59", "50-59% filled"); 1663 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[6], "/PGM/R0/SetSize060..69", "60-69% filled"); 1664 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[7], "/PGM/R0/SetSize070..79", "70-79% filled"); 1665 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[8], "/PGM/R0/SetSize080..89", "80-89% filled"); 1666 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[9], "/PGM/R0/SetSize090..99", "90-99% filled"); 1667 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[10], "/PGM/R0/SetSize100", "100% filled"); 1668 1631 1669 # ifdef PGMPOOL_WITH_GCPHYS_TRACKING 1632 1670 PGM_REG_COUNTER(&pPGM->StatTrackVirgin, "/PGM/Track/Virgin", "The number of first time shadowings"); … … 1670 1708 STAMR3RegisterF(pVM, &pPGM->StatSyncPagePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, 1671 1709 "The number of SyncPage per PD n.", "/PGM/CPU%d/PDSyncPage/%04X", i, j); 1672 1673 /* R0 only: */1674 PGM_REG_COUNTER(&pPGM->StatR0DynMapMigrateInvlPg, "/PGM/CPU%d/R0/DynMapMigrateInvlPg", "invlpg count in PGMDynMapMigrateAutoSet.");1675 PGM_REG_PROFILE(&pPGM->StatR0DynMapGCPageInl, "/PGM/CPU%d/R0/DynMapPageGCPageInl", "Calls to pgmR0DynMapGCPageInlined.");1676 PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlHits, "/PGM/CPU%d/R0/DynMapPageGCPageInl/Hits", "Hash table lookup hits.");1677 PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlMisses, "/PGM/CPU%d/R0/DynMapPageGCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage.");1678 PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlRamHits, "/PGM/CPU%d/R0/DynMapPageGCPageInl/RamHits", "1st ram range hits.");1679 PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlRamMisses, "/PGM/CPU%d/R0/DynMapPageGCPageInl/RamMisses", "1st ram range misses, takes slow path.");1680 PGM_REG_PROFILE(&pPGM->StatR0DynMapHCPageInl, "/PGM/CPU%d/R0/DynMapPageHCPageInl", "Calls to pgmR0DynMapHCPageInlined.");1681 PGM_REG_COUNTER(&pPGM->StatR0DynMapHCPageInlHits, "/PGM/CPU%d/R0/DynMapPageHCPageInl/Hits", "Hash table lookup hits.");1682 PGM_REG_COUNTER(&pPGM->StatR0DynMapHCPageInlMisses, "/PGM/CPU%d/R0/DynMapPageHCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage.");1683 PGM_REG_COUNTER(&pPGM->StatR0DynMapPage, "/PGM/CPU%d/R0/DynMapPage", "Calls to pgmR0DynMapPage");1684 PGM_REG_COUNTER(&pPGM->StatR0DynMapSetOptimize, "/PGM/CPU%d/R0/DynMapPage/SetOptimize", "Calls to pgmDynMapOptimizeAutoSet.");1685 PGM_REG_COUNTER(&pPGM->StatR0DynMapSetSearchFlushes, "/PGM/CPU%d/R0/DynMapPage/SetSearchFlushes","Set search restorting to subset flushes.");1686 PGM_REG_COUNTER(&pPGM->StatR0DynMapSetSearchHits, "/PGM/CPU%d/R0/DynMapPage/SetSearchHits", "Set search hits.");1687 PGM_REG_COUNTER(&pPGM->StatR0DynMapSetSearchMisses, "/PGM/CPU%d/R0/DynMapPage/SetSearchMisses", "Set search misses.");1688 PGM_REG_PROFILE(&pPGM->StatR0DynMapHCPage, "/PGM/CPU%d/R0/DynMapPage/HCPage", "Calls to PGMDynMapHCPage (ring-0).");1689 PGM_REG_COUNTER(&pPGM->StatR0DynMapPageHits0, "/PGM/CPU%d/R0/DynMapPage/Hits0", "Hits at iPage+0");1690 PGM_REG_COUNTER(&pPGM->StatR0DynMapPageHits1, "/PGM/CPU%d/R0/DynMapPage/Hits1", "Hits at iPage+1");1691 PGM_REG_COUNTER(&pPGM->StatR0DynMapPageHits2, "/PGM/CPU%d/R0/DynMapPage/Hits2", "Hits at iPage+2");1692 PGM_REG_COUNTER(&pPGM->StatR0DynMapPageInvlPg, "/PGM/CPU%d/R0/DynMapPage/InvlPg", "invlpg count in pgmR0DynMapPageSlow.");1693 PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlow, "/PGM/CPU%d/R0/DynMapPage/Slow", "Calls to pgmR0DynMapPageSlow - subtract this from pgmR0DynMapPage to get 1st level hits.");1694 PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlowLoopHits, "/PGM/CPU%d/R0/DynMapPage/SlowLoopHits" , "Hits in the loop path.");1695 PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlowLoopMisses, "/PGM/CPU%d/R0/DynMapPage/SlowLoopMisses", "Misses in the loop path. NonLoopMisses = Slow - SlowLoopHit - SlowLoopMisses");1696 //PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlowLostHits, "/PGM/CPU%d/R0/DynMapPage/SlowLostHits", "Lost hits.");1697 PGM_REG_COUNTER(&pPGM->StatR0DynMapSubsets, "/PGM/CPU%d/R0/Subsets", "Times PGMDynMapPushAutoSubset was called.");1698 PGM_REG_COUNTER(&pPGM->StatR0DynMapPopFlushes, "/PGM/CPU%d/R0/SubsetPopFlushes", "Times PGMDynMapPopAutoSubset flushes the subset.");1699 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[0], "/PGM/CPU%d/R0/SetSize000..09", "00-09% filled");1700 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[1], "/PGM/CPU%d/R0/SetSize010..19", "10-19% filled");1701 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[2], "/PGM/CPU%d/R0/SetSize020..29", "20-29% filled");1702 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[3], "/PGM/CPU%d/R0/SetSize030..39", "30-39% filled");1703 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[4], "/PGM/CPU%d/R0/SetSize040..49", "40-49% filled");1704 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[5], "/PGM/CPU%d/R0/SetSize050..59", "50-59% filled");1705 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[6], "/PGM/CPU%d/R0/SetSize060..69", "60-69% filled");1706 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[7], "/PGM/CPU%d/R0/SetSize070..79", "70-79% filled");1707 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[8], "/PGM/CPU%d/R0/SetSize080..89", "80-89% filled");1708 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[9], "/PGM/CPU%d/R0/SetSize090..99", "90-99% filled");1709 PGM_REG_COUNTER(&pPGM->aStatR0DynMapSetSize[10], "/PGM/CPU%d/R0/SetSize100", "100% filled");1710 1710 1711 1711 /* RZ only: */ -
trunk/src/VBox/VMM/PGMInternal.h
r18959 r18974 2197 2197 SUPPAGINGMODE enmHostMode; 2198 2198 2199 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 2200 /** Automatically tracked physical memory mapping set. 2201 * Ring-0 and strict raw-mode builds. */ 2202 PGMMAPSET AutoSet; 2203 #endif 2204 2199 2205 /** 4 MB page mask; 32 or 36 bits depending on PSE-36 (identical for all VCPUs) */ 2200 2206 RTGCPHYS GCPhys4MBPSEMask; … … 2451 2457 STAMCOUNTER StatRCInvlPgConflict; /**< RC: Number of times PGMInvalidatePage() detected a mapping conflict. */ 2452 2458 STAMCOUNTER StatRCInvlPgSyncMonCR3; /**< RC: Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3. */ 2453 2454 # ifdef PGMPOOL_WITH_GCPHYS_TRACKING2455 STAMCOUNTER StatTrackVirgin; /**< The number of first time shadowings. */2456 STAMCOUNTER StatTrackAliased; /**< The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */2457 STAMCOUNTER StatTrackAliasedMany; /**< The number of times we're tracking using cRef2. */2458 STAMCOUNTER StatTrackAliasedLots; /**< The number of times we're hitting pages which has overflowed cRef2. */2459 STAMCOUNTER StatTrackOverflows; /**< The number of times the extent list grows to long. */2460 STAMPROFILE StatTrackDeref; /**< Profiling of SyncPageWorkerTrackDeref (expensive). */2461 # endif2462 #endif2463 } PGM;2464 /** Pointer to the PGM instance data. */2465 typedef PGM *PPGM;2466 2467 2468 /**2469 * Converts a PGMCPU pointer into a VM pointer.2470 * @returns Pointer to the VM structure the PGM is part of.2471 * @param pPGM Pointer to PGMCPU instance data.2472 */2473 #define PGMCPU2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )2474 2475 /**2476 * Converts a PGMCPU pointer into a PGM pointer.2477 * @returns Pointer to the VM structure the PGM is part of.2478 * @param pPGM Pointer to PGMCPU instance data.2479 */2480 #define PGMCPU2PGM(pPGMCpu) ( (PPGM)((char*)pPGMCpu - pPGMCpu->offPGM) )2481 2482 /**2483 * PGMCPU Data (part of VMCPU).2484 */2485 typedef struct PGMCPU2486 {2487 /** Offset to the VM structure. */2488 RTINT offVM;2489 /** Offset to the VMCPU structure. */2490 RTINT offVCpu;2491 /** Offset of the PGM structure relative to VMCPU. */2492 RTINT offPGM;2493 RTINT uPadding0; /**< structure size alignment. */2494 2495 /** Automatically tracked physical memory mapping set.2496 * Ring-0 and strict raw-mode builds. */2497 PGMMAPSET AutoSet;2498 2499 /** A20 gate mask.2500 * Our current approach to A20 emulation is to let REM do it and don't bother2501 * anywhere else. The interesting Guests will be operating with it enabled anyway.2502 * But whould need arrise, we'll subject physical addresses to this mask. */2503 RTGCPHYS GCPhysA20Mask;2504 /** A20 gate state - boolean! */2505 bool fA20Enabled;2506 2507 /** What needs syncing (PGM_SYNC_*).2508 * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,2509 * PGMFlushTLB, and PGMR3Load. */2510 RTUINT fSyncFlags;2511 2512 /** The shadow paging mode. */2513 PGMMODE enmShadowMode;2514 /** The guest paging mode. */2515 PGMMODE enmGuestMode;2516 2517 /** The current physical address representing in the guest CR3 register. */2518 RTGCPHYS GCPhysCR3;2519 2520 /** @name 32-bit Guest Paging.2521 * @{ */2522 /** The guest's page directory, R3 pointer. */2523 R3PTRTYPE(PX86PD) pGst32BitPdR3;2524 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE2525 /** The guest's page directory, R0 pointer. */2526 R0PTRTYPE(PX86PD) pGst32BitPdR0;2527 #endif2528 /** The guest's page directory, static RC mapping. */2529 RCPTRTYPE(PX86PD) pGst32BitPdRC;2530 /** @} */2531 2532 /** @name PAE Guest Paging.2533 * @{ */2534 /** The guest's page directory pointer table, static RC mapping. */2535 RCPTRTYPE(PX86PDPT) pGstPaePdptRC;2536 /** The guest's page directory pointer table, R3 pointer. */2537 R3PTRTYPE(PX86PDPT) pGstPaePdptR3;2538 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE2539 /** The guest's page directory pointer table, R0 pointer. */2540 R0PTRTYPE(PX86PDPT) pGstPaePdptR0;2541 #endif2542 2543 /** The guest's page directories, R3 pointers.2544 * These are individual pointers and don't have to be adjecent.2545 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */2546 R3PTRTYPE(PX86PDPAE) apGstPaePDsR3[4];2547 /** The guest's page directories, R0 pointers.2548 * Same restrictions as apGstPaePDsR3. */2549 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE2550 R0PTRTYPE(PX86PDPAE) apGstPaePDsR0[4];2551 #endif2552 /** The guest's page directories, static GC mapping.2553 * Unlike the R3/R0 array the first entry can be accessed as a 2048 entry PD.2554 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */2555 RCPTRTYPE(PX86PDPAE) apGstPaePDsRC[4];2556 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */2557 RTGCPHYS aGCPhysGstPaePDs[4];2558 /** The physical addresses of the monitored guest page directories (PAE). */2559 RTGCPHYS aGCPhysGstPaePDsMonitored[4];2560 /** @} */2561 2562 /** @name AMD64 Guest Paging.2563 * @{ */2564 /** The guest's page directory pointer table, R3 pointer. */2565 R3PTRTYPE(PX86PML4) pGstAmd64Pml4R3;2566 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE2567 /** The guest's page directory pointer table, R0 pointer. */2568 R0PTRTYPE(PX86PML4) pGstAmd64Pml4R0;2569 #endif2570 /** @} */2571 2572 /** Pointer to the page of the current active CR3 - R3 Ptr. */2573 R3PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R3;2574 /** Pointer to the page of the current active CR3 - R0 Ptr. */2575 R0PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R0;2576 /** Pointer to the page of the current active CR3 - RC Ptr. */2577 RCPTRTYPE(PPGMPOOLPAGE) pShwPageCR3RC;2578 /* The shadow page pool index of the user table as specified during allocation; useful for freeing root pages */2579 uint32_t iShwUser;2580 /* The index into the user table (shadowed) as specified during allocation; useful for freeing root pages. */2581 uint32_t iShwUserTable;2582 # if HC_ARCH_BITS == 642583 RTRCPTR alignment6; /**< structure size alignment. */2584 # endif2585 /** @} */2586 2587 /** @name Function pointers for Shadow paging.2588 * @{2589 */2590 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, PVMCPU pVCpu, RTGCPTR offDelta));2591 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM, PVMCPU pVCpu));2592 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));2593 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));2594 2595 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));2596 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));2597 2598 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));2599 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));2600 2601 /** @} */2602 2603 /** @name Function pointers for Guest paging.2604 * @{2605 */2606 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, PVMCPU pVCpu, RTGCPTR offDelta));2607 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM, PVMCPU pVCpu));2608 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));2609 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));2610 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));2611 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));2612 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));2613 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));2614 #if HC_ARCH_BITS == 642615 RTRCPTR alignment3; /**< structure size alignment. */2616 #endif2617 2618 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));2619 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));2620 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));2621 /** @} */2622 2623 /** @name Function pointers for Both Shadow and Guest paging.2624 * @{2625 */2626 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, PVMCPU pVCpu, RTGCPTR offDelta));2627 /* no pfnR3BthTrap0eHandler */2628 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));2629 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));2630 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));2631 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));2632 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));2633 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));2634 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3));2635 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVM pVM, PVMCPU pVCpu));2636 2637 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));2638 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));2639 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));2640 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));2641 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));2642 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));2643 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));2644 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3));2645 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVM pVM, PVMCPU pVCpu));2646 2647 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVM pVM, PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));2648 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));2649 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));2650 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVM pVM, PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));2651 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));2652 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));2653 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));2654 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3));2655 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVM pVM, PVMCPU pVCpu));2656 #if HC_ARCH_BITS == 642657 RTRCPTR alignment2; /**< structure size alignment. */2658 #endif2659 /** @} */2660 2661 /** @name Release Statistics2662 * @{ */2663 /** The number of times the guest has switched mode since last reset or statistics reset. */2664 STAMCOUNTER cGuestModeChanges;2665 /** @} */2666 2667 #ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap. */2668 /** @name Statistics2669 * @{ */2670 /** RC: Which statistic this \#PF should be attributed to. */2671 RCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionRC;2672 RTRCPTR padding0;2673 /** R0: Which statistic this \#PF should be attributed to. */2674 R0PTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionR0;2675 RTR0PTR padding1;2676 2677 /* Common */2678 STAMCOUNTER StatSyncPtPD[X86_PG_ENTRIES]; /**< SyncPT - PD distribution. */2679 STAMCOUNTER StatSyncPagePD[X86_PG_ENTRIES]; /**< SyncPage - PD distribution. */2680 2459 2681 2460 /* R0 only: */ … … 2706 2485 STAMCOUNTER StatR0DynMapPopFlushes; /**< R0: Times PGMDynMapPopAutoSubset flushes the subset. */ 2707 2486 STAMCOUNTER aStatR0DynMapSetSize[11]; /**< R0: Set size distribution. */ 2487 2488 # ifdef PGMPOOL_WITH_GCPHYS_TRACKING 2489 STAMCOUNTER StatTrackVirgin; /**< The number of first time shadowings. */ 2490 STAMCOUNTER StatTrackAliased; /**< The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */ 2491 STAMCOUNTER StatTrackAliasedMany; /**< The number of times we're tracking using cRef2. */ 2492 STAMCOUNTER StatTrackAliasedLots; /**< The number of times we're hitting pages which has overflowed cRef2. */ 2493 STAMCOUNTER StatTrackOverflows; /**< The number of times the extent list grows to long. */ 2494 STAMPROFILE StatTrackDeref; /**< Profiling of SyncPageWorkerTrackDeref (expensive). */ 2495 # endif 2496 #endif 2497 } PGM; 2498 /** Pointer to the PGM instance data. */ 2499 typedef PGM *PPGM; 2500 2501 2502 /** 2503 * Converts a PGMCPU pointer into a VM pointer. 2504 * @returns Pointer to the VM structure the PGM is part of. 2505 * @param pPGM Pointer to PGMCPU instance data. 2506 */ 2507 #define PGMCPU2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) ) 2508 2509 /** 2510 * Converts a PGMCPU pointer into a PGM pointer. 2511 * @returns Pointer to the VM structure the PGM is part of. 2512 * @param pPGM Pointer to PGMCPU instance data. 2513 */ 2514 #define PGMCPU2PGM(pPGMCpu) ( (PPGM)((char*)pPGMCpu - pPGMCpu->offPGM) ) 2515 2516 /** 2517 * PGMCPU Data (part of VMCPU). 2518 */ 2519 typedef struct PGMCPU 2520 { 2521 /** Offset to the VM structure. */ 2522 RTINT offVM; 2523 /** Offset to the VMCPU structure. */ 2524 RTINT offVCpu; 2525 /** Offset of the PGM structure relative to VMCPU. */ 2526 RTINT offPGM; 2527 RTINT uPadding0; /**< structure size alignment. */ 2528 2529 /** A20 gate mask. 2530 * Our current approach to A20 emulation is to let REM do it and don't bother 2531 * anywhere else. The interesting Guests will be operating with it enabled anyway. 2532 * But whould need arrise, we'll subject physical addresses to this mask. */ 2533 RTGCPHYS GCPhysA20Mask; 2534 /** A20 gate state - boolean! */ 2535 bool fA20Enabled; 2536 2537 /** What needs syncing (PGM_SYNC_*). 2538 * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage, 2539 * PGMFlushTLB, and PGMR3Load. */ 2540 RTUINT fSyncFlags; 2541 2542 /** The shadow paging mode. */ 2543 PGMMODE enmShadowMode; 2544 /** The guest paging mode. */ 2545 PGMMODE enmGuestMode; 2546 2547 /** The current physical address representing in the guest CR3 register. */ 2548 RTGCPHYS GCPhysCR3; 2549 2550 /** @name 32-bit Guest Paging. 2551 * @{ */ 2552 /** The guest's page directory, R3 pointer. */ 2553 R3PTRTYPE(PX86PD) pGst32BitPdR3; 2554 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 2555 /** The guest's page directory, R0 pointer. */ 2556 R0PTRTYPE(PX86PD) pGst32BitPdR0; 2557 #endif 2558 /** The guest's page directory, static RC mapping. */ 2559 RCPTRTYPE(PX86PD) pGst32BitPdRC; 2560 /** @} */ 2561 2562 /** @name PAE Guest Paging. 2563 * @{ */ 2564 /** The guest's page directory pointer table, static RC mapping. */ 2565 RCPTRTYPE(PX86PDPT) pGstPaePdptRC; 2566 /** The guest's page directory pointer table, R3 pointer. */ 2567 R3PTRTYPE(PX86PDPT) pGstPaePdptR3; 2568 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 2569 /** The guest's page directory pointer table, R0 pointer. */ 2570 R0PTRTYPE(PX86PDPT) pGstPaePdptR0; 2571 #endif 2572 2573 /** The guest's page directories, R3 pointers. 2574 * These are individual pointers and don't have to be adjecent. 2575 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */ 2576 R3PTRTYPE(PX86PDPAE) apGstPaePDsR3[4]; 2577 /** The guest's page directories, R0 pointers. 2578 * Same restrictions as apGstPaePDsR3. */ 2579 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 2580 R0PTRTYPE(PX86PDPAE) apGstPaePDsR0[4]; 2581 #endif 2582 /** The guest's page directories, static GC mapping. 2583 * Unlike the R3/R0 array the first entry can be accessed as a 2048 entry PD. 2584 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */ 2585 RCPTRTYPE(PX86PDPAE) apGstPaePDsRC[4]; 2586 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */ 2587 RTGCPHYS aGCPhysGstPaePDs[4]; 2588 /** The physical addresses of the monitored guest page directories (PAE). */ 2589 RTGCPHYS aGCPhysGstPaePDsMonitored[4]; 2590 /** @} */ 2591 2592 /** @name AMD64 Guest Paging. 2593 * @{ */ 2594 /** The guest's page directory pointer table, R3 pointer. */ 2595 R3PTRTYPE(PX86PML4) pGstAmd64Pml4R3; 2596 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 2597 /** The guest's page directory pointer table, R0 pointer. */ 2598 R0PTRTYPE(PX86PML4) pGstAmd64Pml4R0; 2599 #endif 2600 /** @} */ 2601 2602 /** Pointer to the page of the current active CR3 - R3 Ptr. */ 2603 R3PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R3; 2604 /** Pointer to the page of the current active CR3 - R0 Ptr. */ 2605 R0PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R0; 2606 /** Pointer to the page of the current active CR3 - RC Ptr. */ 2607 RCPTRTYPE(PPGMPOOLPAGE) pShwPageCR3RC; 2608 /* The shadow page pool index of the user table as specified during allocation; useful for freeing root pages */ 2609 uint32_t iShwUser; 2610 /* The index into the user table (shadowed) as specified during allocation; useful for freeing root pages. */ 2611 uint32_t iShwUserTable; 2612 # if HC_ARCH_BITS == 64 2613 RTRCPTR alignment6; /**< structure size alignment. */ 2614 # endif 2615 /** @} */ 2616 2617 /** @name Function pointers for Shadow paging. 2618 * @{ 2619 */ 2620 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, PVMCPU pVCpu, RTGCPTR offDelta)); 2621 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM, PVMCPU pVCpu)); 2622 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)); 2623 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2624 2625 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)); 2626 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2627 2628 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)); 2629 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2630 2631 /** @} */ 2632 2633 /** @name Function pointers for Guest paging. 2634 * @{ 2635 */ 2636 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, PVMCPU pVCpu, RTGCPTR offDelta)); 2637 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM, PVMCPU pVCpu)); 2638 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)); 2639 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2640 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde)); 2641 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)); 2642 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2643 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde)); 2644 #if HC_ARCH_BITS == 64 2645 RTRCPTR alignment3; /**< structure size alignment. */ 2646 #endif 2647 2648 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)); 2649 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2650 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde)); 2651 /** @} */ 2652 2653 /** @name Function pointers for Both Shadow and Guest paging. 2654 * @{ 2655 */ 2656 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, PVMCPU pVCpu, RTGCPTR offDelta)); 2657 /* no pfnR3BthTrap0eHandler */ 2658 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage)); 2659 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)); 2660 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError)); 2661 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage)); 2662 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError)); 2663 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb)); 2664 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3)); 2665 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVM pVM, PVMCPU pVCpu)); 2666 2667 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)); 2668 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage)); 2669 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)); 2670 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError)); 2671 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage)); 2672 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError)); 2673 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb)); 2674 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3)); 2675 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVM pVM, PVMCPU pVCpu)); 2676 2677 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVM pVM, PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)); 2678 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage)); 2679 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)); 2680 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVM pVM, PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError)); 2681 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage)); 2682 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError)); 2683 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb)); 2684 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3)); 2685 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVM pVM, PVMCPU pVCpu)); 2686 #if HC_ARCH_BITS == 64 2687 RTRCPTR alignment2; /**< structure size alignment. */ 2688 #endif 2689 /** @} */ 2690 2691 /** @name Release Statistics 2692 * @{ */ 2693 /** The number of times the guest has switched mode since last reset or statistics reset. */ 2694 STAMCOUNTER cGuestModeChanges; 2695 /** @} */ 2696 2697 #ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap. */ 2698 /** @name Statistics 2699 * @{ */ 2700 /** RC: Which statistic this \#PF should be attributed to. */ 2701 RCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionRC; 2702 RTRCPTR padding0; 2703 /** R0: Which statistic this \#PF should be attributed to. */ 2704 R0PTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionR0; 2705 RTR0PTR padding1; 2706 2707 /* Common */ 2708 STAMCOUNTER StatSyncPtPD[X86_PG_ENTRIES]; /**< SyncPT - PD distribution. */ 2709 STAMCOUNTER StatSyncPagePD[X86_PG_ENTRIES]; /**< SyncPage - PD distribution. */ 2708 2710 2709 2711 /* RZ only: */ … … 3217 3219 DECLINLINE(int) pgmR0DynMapHCPageInlined(PPGM pPGM, RTHCPHYS HCPhys, void **ppv) 3218 3220 { 3219 PVM pVM = PGM2VM(pPGM); 3220 PPGMCPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */ 3221 PPGMMAPSET pSet = &pPGMCPU->AutoSet; 3222 3223 STAM_PROFILE_START(&pPGMCPU->StatR0DynMapHCPageInl, a); 3221 PVM pVM = PGM2VM(pPGM); 3222 PPGMMAPSET pSet = &pPGM->AutoSet; 3223 3224 STAM_PROFILE_START(&pPGM->StatR0DynMapHCPageInl, a); 3224 3225 Assert(!(HCPhys & PAGE_OFFSET_MASK)); 3225 3226 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries)); … … 3231 3232 { 3232 3233 *ppv = pSet->aEntries[iEntry].pvPage; 3233 STAM_COUNTER_INC(&pPGM CPU->StatR0DynMapHCPageInlHits);3234 STAM_COUNTER_INC(&pPGM->StatR0DynMapHCPageInlHits); 3234 3235 } 3235 3236 else … … 3239 3240 } 3240 3241 3241 STAM_PROFILE_STOP(&pPGM CPU->StatR0DynMapHCPageInl, a);3242 STAM_PROFILE_STOP(&pPGM->StatR0DynMapHCPageInl, a); 3242 3243 return VINF_SUCCESS; 3243 3244 } … … 3255 3256 DECLINLINE(int) pgmR0DynMapGCPageInlined(PPGM pPGM, RTGCPHYS GCPhys, void **ppv) 3256 3257 { 3257 PVM pVM= PGM2VM(pPGM);3258 PPGM CPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */3259 3260 STAM_PROFILE_START(&pPGM CPU->StatR0DynMapGCPageInl, a);3258 PVM pVM = PGM2VM(pPGM); 3259 PPGMMAPSET pSet = &pPGM->AutoSet; 3260 3261 STAM_PROFILE_START(&pPGM->StatR0DynMapGCPageInl, a); 3261 3262 Assert(!(GCPhys & PAGE_OFFSET_MASK)); 3262 3263 … … 3270 3271 { 3271 3272 /* This case is not counted into StatR0DynMapGCPageInl. */ 3272 STAM_COUNTER_INC(&pPGM CPU->StatR0DynMapGCPageInlRamMisses);3273 STAM_COUNTER_INC(&pPGM->StatR0DynMapGCPageInlRamMisses); 3273 3274 return PGMDynMapGCPage(pVM, GCPhys, ppv); 3274 3275 } 3275 3276 3276 3277 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]); 3277 STAM_COUNTER_INC(&pPGM CPU->StatR0DynMapGCPageInlRamHits);3278 STAM_COUNTER_INC(&pPGM->StatR0DynMapGCPageInlRamHits); 3278 3279 3279 3280 /* … … 3290 3291 { 3291 3292 *ppv = pSet->aEntries[iEntry].pvPage; 3292 STAM_COUNTER_INC(&pPGM CPU->StatR0DynMapGCPageInlHits);3293 STAM_COUNTER_INC(&pPGM->StatR0DynMapGCPageInlHits); 3293 3294 } 3294 3295 else 3295 3296 { 3296 STAM_COUNTER_INC(&pPGM CPU->StatR0DynMapGCPageInlMisses);3297 STAM_COUNTER_INC(&pPGM->StatR0DynMapGCPageInlMisses); 3297 3298 pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv); 3298 3299 } 3299 3300 3300 STAM_PROFILE_STOP(&pPGM CPU->StatR0DynMapGCPageInl, a);3301 STAM_PROFILE_STOP(&pPGM->StatR0DynMapGCPageInl, a); 3301 3302 return VINF_SUCCESS; 3302 3303 } -
trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp
r18955 r18974 1185 1185 static uint32_t pgmR0DynMapPageSlow(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, uint32_t iPage, PVM pVM) 1186 1186 { 1187 #ifdef VBOX_WITH_STATISTICS 1188 PVMCPU pVCpu = VMMGetCpu(pVM); 1189 #endif 1190 STAM_COUNTER_INC(&pVCpu->pgm.s.StatR0DynMapPageSlow); 1187 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageSlow); 1191 1188 1192 1189 /* … … 1216 1213 if (paPages[iFreePage].HCPhys == HCPhys) 1217 1214 { 1218 STAM_COUNTER_INC(&pV Cpu->pgm.s.StatR0DynMapPageSlowLoopHits);1215 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageSlowLoopHits); 1219 1216 return iFreePage; 1220 1217 } … … 1227 1224 return UINT32_MAX; 1228 1225 } 1229 STAM_COUNTER_INC(&pV Cpu->pgm.s.StatR0DynMapPageSlowLoopMisses);1226 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageSlowLoopMisses); 1230 1227 #ifdef VBOX_WITH_STATISTICS 1231 1228 fLooped = true; … … 1239 1236 for (uint32_t iPage2 = (iPage + 3) % cPages; iPage2 != iPage; iPage2 = (iPage2 + 1) % cPages) 1240 1237 if (paPages[iPage2].HCPhys == HCPhys) 1241 STAM_COUNTER_INC(&pV Cpu->pgm.s.StatR0DynMapPageSlowLostHits);1238 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageSlowLostHits); 1242 1239 #endif 1243 1240 … … 1287 1284 DECLINLINE(uint32_t) pgmR0DynMapPage(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, int32_t iRealCpu, PVM pVM, void **ppvPage) 1288 1285 { 1289 #ifdef VBOX_WITH_STATISTICS1290 PVMCPU pVCpu = VMMGetCpu(pVM);1291 #endif1292 1286 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 1293 1287 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 1294 1288 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys)); 1295 STAM_COUNTER_INC(&pV Cpu->pgm.s.StatR0DynMapPage);1289 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPage); 1296 1290 1297 1291 /* … … 1307 1301 PPGMR0DYNMAPENTRY paPages = pThis->paPages; 1308 1302 if (RT_LIKELY(paPages[iPage].HCPhys == HCPhys)) 1309 STAM_COUNTER_INC(&pV Cpu->pgm.s.StatR0DynMapPageHits0);1303 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageHits0); 1310 1304 else 1311 1305 { … … 1314 1308 { 1315 1309 iPage = iPage2; 1316 STAM_COUNTER_INC(&pV Cpu->pgm.s.StatR0DynMapPageHits1);1310 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageHits1); 1317 1311 } 1318 1312 else … … 1322 1316 { 1323 1317 iPage = iPage2; 1324 STAM_COUNTER_INC(&pV Cpu->pgm.s.StatR0DynMapPageHits2);1318 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageHits2); 1325 1319 } 1326 1320 else … … 1371 1365 if (RT_UNLIKELY(fInvalidateIt)) 1372 1366 { 1373 STAM_COUNTER_INC(&pV Cpu->pgm.s.StatR0DynMapPageInvlPg);1367 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageInvlPg); 1374 1368 ASMInvalidatePage(pvPage); 1375 1369 } … … 1505 1499 * API is called. 1506 1500 * 1507 * @param pV Cpu The shared data for the current virtual CPU.1508 */ 1509 VMMDECL(void) PGMDynMapStartAutoSet(PVM CPU pVCpu)1510 { 1511 Assert(pV Cpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED);1512 Assert(pV Cpu->pgm.s.AutoSet.iSubset == UINT32_MAX);1513 pV Cpu->pgm.s.AutoSet.cEntries = 0;1514 pV Cpu->pgm.s.AutoSet.iCpu = RTMpCpuIdToSetIndex(RTMpCpuId());1501 * @param pVM Pointer to the shared VM structure. 1502 */ 1503 VMMDECL(void) PGMDynMapStartAutoSet(PVM pVM) 1504 { 1505 Assert(pVM->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED); 1506 Assert(pVM->pgm.s.AutoSet.iSubset == UINT32_MAX); 1507 pVM->pgm.s.AutoSet.cEntries = 0; 1508 pVM->pgm.s.AutoSet.iCpu = RTMpCpuIdToSetIndex(RTMpCpuId()); 1515 1509 } 1516 1510 … … 1557 1551 * since the PGMDynMapStartAutoSet call. 1558 1552 * 1559 * @param pV Cpu The shared data for the current virtual CPU.1560 */ 1561 VMMDECL(void) PGMDynMapReleaseAutoSet(PVM CPU pVCpu)1562 { 1563 PPGMMAPSET pSet = &pV Cpu->pgm.s.AutoSet;1553 * @param pVM Pointer to the shared VM structure. 1554 */ 1555 VMMDECL(void) PGMDynMapReleaseAutoSet(PVM pVM) 1556 { 1557 PPGMMAPSET pSet = &pVM->pgm.s.AutoSet; 1564 1558 1565 1559 /* … … 1572 1566 pSet->iCpu = -1; 1573 1567 1574 STAM_COUNTER_INC(&pV Cpu->pgm.s.aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);1568 STAM_COUNTER_INC(&pVM->pgm.s.aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1575 1569 AssertMsg(cEntries < PGMMAPSET_MAX_FILL, ("%u\n", cEntries)); 1576 1570 if (cEntries > RT_ELEMENTS(pSet->aEntries) * 50 / 100) … … 1584 1578 * Flushes the set if it's above a certain threshold. 1585 1579 * 1586 * @param pV Cpu The shared data for the current virtual CPU.1587 */ 1588 VMMDECL(void) PGMDynMapFlushAutoSet(PVM CPU pVCpu)1589 { 1590 PPGMMAPSET pSet = &pV Cpu->pgm.s.AutoSet;1580 * @param pVM Pointer to the shared VM structure. 1581 */ 1582 VMMDECL(void) PGMDynMapFlushAutoSet(PVM pVM) 1583 { 1584 PPGMMAPSET pSet = &pVM->pgm.s.AutoSet; 1591 1585 AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags())); 1592 1586 … … 1596 1590 uint32_t cEntries = pSet->cEntries; 1597 1591 AssertReturnVoid(cEntries != PGMMAPSET_CLOSED); 1598 STAM_COUNTER_INC(&pV Cpu->pgm.s.aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);1592 STAM_COUNTER_INC(&pVM->pgm.s.aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1599 1593 if (cEntries >= RT_ELEMENTS(pSet->aEntries) * 45 / 100) 1600 1594 { … … 1620 1614 * the entries will have been flagged as invalidated. 1621 1615 * 1622 * @param pV Cpu The shared data for the current virtual CPU.1616 * @param pVM Pointer to the shared VM structure. 1623 1617 * @thread EMT 1624 1618 */ 1625 VMMDECL(void) PGMDynMapMigrateAutoSet(PVM CPU pVCpu)1626 { 1627 PPGMMAPSET pSet = &pV Cpu->pgm.s.AutoSet;1619 VMMDECL(void) PGMDynMapMigrateAutoSet(PVM pVM) 1620 { 1621 PPGMMAPSET pSet = &pVM->pgm.s.AutoSet; 1628 1622 int32_t iRealCpu = RTMpCpuIdToSetIndex(RTMpCpuId()); 1629 1623 if (pSet->iCpu != iRealCpu) … … 1650 1644 1651 1645 ASMInvalidatePage(pThis->paPages[iPage].pvPage); 1652 STAM_COUNTER_INC(&pV Cpu->pgm.s.StatR0DynMapMigrateInvlPg);1646 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapMigrateInvlPg); 1653 1647 1654 1648 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); … … 1718 1712 * @returns The index of the previous subset. Pass this to 1719 1713 * PGMDynMapPopAutoSubset when poping it. 1720 * @param pV Cpu Pointer to the virtual cpu data.1721 */ 1722 VMMDECL(uint32_t) PGMDynMapPushAutoSubset(PVM CPU pVCpu)1723 { 1724 PPGMMAPSET pSet = &pV Cpu->pgm.s.AutoSet;1714 * @param pVM Pointer to the shared VM structure. 1715 */ 1716 VMMDECL(uint32_t) PGMDynMapPushAutoSubset(PVM pVM) 1717 { 1718 PPGMMAPSET pSet = &pVM->pgm.s.AutoSet; 1725 1719 AssertReturn(pSet->cEntries != PGMMAPSET_CLOSED, UINT32_MAX); 1726 1720 uint32_t iPrevSubset = pSet->iSubset; 1727 1721 Assert(iPrevSubset == UINT32_MAX); 1728 1722 pSet->iSubset = pSet->cEntries; 1729 STAM_COUNTER_INC(&pV Cpu->pgm.s.StatR0DynMapSubsets);1723 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapSubsets); 1730 1724 return iPrevSubset; 1731 1725 } … … 1735 1729 * Pops a subset created by a previous call to PGMDynMapPushAutoSubset. 1736 1730 * 1737 * @param pV Cpu Pointer to the virtual cpu data.1731 * @param pVM Pointer to the shared VM structure. 1738 1732 * @param iPrevSubset What PGMDynMapPushAutoSubset returned. 1739 1733 */ 1740 VMMDECL(void) PGMDynMapPopAutoSubset(PVM CPU pVCpu, uint32_t iPrevSubset)1741 { 1742 PPGMMAPSET pSet = &pV Cpu->pgm.s.AutoSet;1734 VMMDECL(void) PGMDynMapPopAutoSubset(PVM pVM, uint32_t iPrevSubset) 1735 { 1736 PPGMMAPSET pSet = &pVM->pgm.s.AutoSet; 1743 1737 uint32_t cEntries = pSet->cEntries; 1744 1738 AssertReturnVoid(cEntries != PGMMAPSET_CLOSED); 1745 1739 AssertReturnVoid(pSet->iSubset <= iPrevSubset || iPrevSubset == UINT32_MAX); 1746 1740 Assert(iPrevSubset == UINT32_MAX); 1747 STAM_COUNTER_INC(&pV Cpu->pgm.s.aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);1741 STAM_COUNTER_INC(&pVM->pgm.s.aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1748 1742 if ( cEntries >= RT_ELEMENTS(pSet->aEntries) * 40 / 100 1749 1743 && cEntries != pSet->iSubset) … … 1816 1810 int pgmR0DynMapHCPageCommon(PVM pVM, PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv) 1817 1811 { 1818 #ifdef VBOX_WITH_STATISTICS1819 PVMCPU pVCpu = VMMGetCpu(pVM);1820 #endif1821 1812 AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags())); 1822 1813 … … 1891 1882 { 1892 1883 pSet->aEntries[i].cRefs++; 1893 STAM_COUNTER_INC(&pV Cpu->pgm.s.StatR0DynMapSetSearchHits);1884 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapSetSearchHits); 1894 1885 break; 1895 1886 } 1896 1887 if (i < 0) 1897 1888 { 1898 STAM_COUNTER_INC(&pV Cpu->pgm.s.StatR0DynMapSetSearchMisses);1889 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapSetSearchMisses); 1899 1890 if (pSet->iSubset < pSet->cEntries) 1900 1891 { 1901 STAM_COUNTER_INC(&pV Cpu->pgm.s.StatR0DynMapSetSearchFlushes);1902 STAM_COUNTER_INC(&pV Cpu->pgm.s.aStatR0DynMapSetSize[(pSet->cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);1892 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapSetSearchFlushes); 1893 STAM_COUNTER_INC(&pVM->pgm.s.aStatR0DynMapSetSize[(pSet->cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1903 1894 AssertMsg(pSet->cEntries < PGMMAPSET_MAX_FILL, ("%u\n", pSet->cEntries)); 1904 1895 pgmDynMapFlushSubset(pSet); … … 1907 1898 if (RT_UNLIKELY(pSet->cEntries >= RT_ELEMENTS(pSet->aEntries))) 1908 1899 { 1909 STAM_COUNTER_INC(&pV Cpu->pgm.s.StatR0DynMapSetOptimize);1900 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapSetOptimize); 1910 1901 pgmDynMapOptimizeAutoSet(pSet); 1911 1902 } … … 1943 1934 VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv) 1944 1935 { 1945 #ifdef VBOX_WITH_STATISTICS1946 PVMCPU pVCpu = VMMGetCpu(pVM);1947 #endif1948 1936 /* 1949 1937 * Validate state. 1950 1938 */ 1951 STAM_PROFILE_START(&pV Cpu->pgm.s.StatR0DynMapHCPage, a);1939 STAM_PROFILE_START(&pVM->pgm.s.StatR0DynMapHCPage, a); 1952 1940 AssertPtr(ppv); 1953 1941 AssertMsg(pVM->pgm.s.pvR0DynMapUsed == g_pPGMR0DynMap, 1954 1942 ("%p != %p\n", pVM->pgm.s.pvR0DynMapUsed, g_pPGMR0DynMap)); 1955 1943 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys)); 1956 PVMCPU pVCpu = VMMGetCpu(pVM); 1957 AssertPtr(pVCpu); 1958 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; 1944 PPGMMAPSET pSet = &pVM->pgm.s.AutoSet; 1959 1945 AssertMsg(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries), 1960 1946 ("%#x (%u)\n", pSet->cEntries, pSet->cEntries)); … … 1965 1951 int rc = pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv); 1966 1952 1967 STAM_PROFILE_STOP(&pV Cpu->pgm.s.StatR0DynMapHCPage, a);1953 STAM_PROFILE_STOP(&pVM->pgm.s.StatR0DynMapHCPage, a); 1968 1954 return rc; 1969 1955 } -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r18956 r18974 411 411 GEN_CHECK_OFF(PGMCPU, offVCpu); 412 412 GEN_CHECK_OFF(PGMCPU, offPGM); 413 GEN_CHECK_OFF(PGMCPU, AutoSet); 413 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 414 GEN_CHECK_OFF(PGM, AutoSet); 415 #endif 414 416 GEN_CHECK_OFF(PGMCPU, GCPhysA20Mask); 415 417 GEN_CHECK_OFF(PGMCPU, fA20Enabled);
Note:
See TracChangeset
for help on using the changeset viewer.