- Timestamp:
- Aug 7, 2019 8:48:20 AM (5 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r80167 r80172 764 764 return rc; 765 765 } 766 767 # if defined(LOG_ENABLED) && 0768 RTGCPHYS GCPhys2;769 uint64_t fPageGst2;770 PGMGstGetPage(pVCpu, pvFault, &fPageGst2, &GCPhys2);771 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)772 Log(("Page out of sync: %RGv eip=%08x PdeSrc.US=%d fPageGst2=%08llx GCPhys2=%RGp scan=%d\n",773 pvFault, pRegFrame->eip, GstWalk.Pde.n.u1User, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip)));774 # else775 Log(("Page out of sync: %RGv eip=%08x fPageGst2=%08llx GCPhys2=%RGp scan=%d\n",776 pvFault, pRegFrame->eip, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip)));777 # endif778 # endif /* LOG_ENABLED */779 766 780 767 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) … … 2749 2736 if (PteSrc.n.u1Present) 2750 2737 { 2751 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 2752 /* 2753 * Assuming kernel code will be marked as supervisor - and not as user level 2754 * and executed using a conforming code selector - And marked as readonly. 2755 * Also assume that if we're monitoring a page, it's of no interest to CSAM. 2756 */ 2757 PPGMPAGE pPage; 2758 if ( ((PdeSrc.u & pPTSrc->a[iPTSrc].u) & (X86_PTE_RW | X86_PTE_US)) 2759 || !CSAMDoesPageNeedScanning(pVM, GCPtrCur) 2760 || ( (pPage = pgmPhysGetPage(pVM, GST_GET_PTE_GCPHYS(PteSrc))) 2761 && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 2762 ) 2763 # endif 2764 PGM_BTH_NAME(SyncPageWorker)(pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst); 2738 PGM_BTH_NAME(SyncPageWorker)(pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst); 2765 2739 Log2(("SyncPT: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s dst.raw=%08llx iPTSrc=%x PdeSrc.u=%x physpte=%RGp\n", 2766 2740 GCPtrCur, … … 3333 3307 && !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) \ 3334 3308 && PGM_SHW_TYPE != PGM_TYPE_NONE 3335 3336 # ifdef VBOX_WITH_RAW_MODE_NOT_R03337 if (!(fPage & X86_PTE_US))3338 {3339 /*3340 * Mark this page as safe.3341 */3342 /** @todo not correct for pages that contain both code and data!! */3343 Log(("CSAMMarkPage %RGv; scanned=%d\n", GCPtrPage, true));3344 CSAMMarkPage(pVM, GCPtrPage, true);3345 }3346 # endif3347 3309 3348 3310 /* -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r80163 r80172 399 399 rc = rc2; 400 400 401 #ifndef IN_RC402 401 /* Tell NEM about the protection update. */ 403 402 if (VM_IS_NEM_ENABLED(pVM)) … … 409 408 PGM_PAGE_SET_NEM_STATE(pPage, u2State); 410 409 } 411 #endif412 410 } 413 411 … … 627 625 #endif 628 626 /** @todo do we need this notification? */ 629 #if defined(IN_RING3) || defined(IN_RING0)630 627 NEMHCNotifyHandlerPhysicalDeregister(pVM, pCurType->enmKind, GCPhysStart, GCPhysLast - GCPhysStart + 1, 631 628 fRestoreAsRAM, fRestoreAsRAM2); 632 #else633 RT_NOREF_PV(fRestoreAsRAM); /** @todo this needs more work for REM! */634 RT_NOREF_PV(fRestoreAsRAM2);635 #endif636 629 } 637 630 … … 685 678 AssertRC(rc); 686 679 687 #ifndef IN_RC688 680 /* Tell NEM about the protection update. */ 689 681 if (VM_IS_NEM_ENABLED(pVM)) … … 695 687 PGM_PAGE_SET_NEM_STATE(pPage, u2State); 696 688 } 697 #endif698 689 } 699 690 else … … 718 709 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO); 719 710 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED); 720 #ifndef IN_RC721 711 RTHCPHYS const HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage); 722 #endif723 712 724 713 /* … … 728 717 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage, true /*fFlushPTEs*/, &fFlushTLBs); 729 718 AssertLogRelRCReturnVoid(rc); 730 #ifdef IN_RC731 if (fFlushTLBs && rc != VINF_PGM_SYNC_CR3)732 PGM_INVL_VCPU_TLBS(VMMGetCpu0(pVM));733 #else734 719 HMFlushTlbOnAllVCpus(pVM); 735 #endif736 720 737 721 /* … … 762 746 } 763 747 764 #ifndef IN_RC765 748 /* 766 749 * Tell NEM about the protection change. … … 773 756 PGM_PAGE_SET_NEM_STATE(pPage, u2State); 774 757 } 775 #endif776 758 } 777 759 … … 820 802 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE); 821 803 822 #ifndef IN_RC823 804 /* Tell NEM about the protection change. */ 824 805 if (VM_IS_NEM_ENABLED(pVM) && !fNemNotifiedAlready) … … 830 811 PGM_PAGE_SET_NEM_STATE(pPage, u2State); 831 812 } 832 #else833 RT_NOREF_PV(fNemNotifiedAlready);834 #endif835 813 } 836 814 else … … 883 861 */ 884 862 pgmHandlerPhysicalResetRamFlags(pVM, pCur); 885 #if defined(VBOX_WITH_REM) || defined(IN_RING3) || defined(IN_RING0)886 863 PPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur); 887 864 bool const fRestoreAsRAM = pCurType->pfnHandlerR3 /** @todo this isn't entirely correct. */ 888 865 && pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO; 889 #endif890 866 891 867 /* … … 909 885 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core)) 910 886 { 911 #if defined(VBOX_WITH_REM) || defined(IN_RING3) || defined(IN_RING0)912 887 RTGCPHYS const cb = GCPhysLast - GCPhys + 1; 913 888 PGMPHYSHANDLERKIND const enmKind = pCurType->enmKind; 914 #endif915 889 #ifdef VBOX_WITH_REM 916 890 bool const fHasHCHandler = !!pCurType->pfnHandlerR3; … … 923 897 924 898 /** @todo NEM: not sure we need this notification... */ 925 #if defined(IN_RING3) || defined(IN_RING0)926 899 NEMHCNotifyHandlerPhysicalModify(pVM, enmKind, GCPhysCurrent, GCPhys, cb, fRestoreAsRAM); 927 #endif928 900 929 901 pgmUnlock(pVM); … … 1323 1295 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED); 1324 1296 pCur->cTmpOffPages++; 1325 #ifndef IN_RC 1297 1326 1298 /* Tell NEM about the protection change (VGA is using this to track dirty pages). */ 1327 1299 if (VM_IS_NEM_ENABLED(pVM)) … … 1333 1305 PGM_PAGE_SET_NEM_STATE(pPage, u2State); 1334 1306 } 1335 #endif1336 1307 } 1337 1308 pgmUnlock(pVM); … … 1460 1431 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage); 1461 1432 1462 # ifndef IN_RC1463 1433 /* Tell NEM about the backing and protection change. */ 1464 1434 if (VM_IS_NEM_ENABLED(pVM)) … … 1470 1440 PGM_PAGE_SET_NEM_STATE(pPage, u2State); 1471 1441 } 1472 # endif1473 1442 LogFlow(("PGMHandlerPhysicalPageAlias: => %R[pgmpage]\n", pPage)); 1474 1443 pgmUnlock(pVM); … … 1572 1541 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage); 1573 1542 1574 # ifndef IN_RC1575 1543 /* Tell NEM about the backing and protection change. */ 1576 1544 if (VM_IS_NEM_ENABLED(pVM)) … … 1582 1550 PGM_PAGE_SET_NEM_STATE(pPage, u2State); 1583 1551 } 1584 # endif1585 1552 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage)); 1586 1553 pgmUnlock(pVM); -
trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
r80118 r80172 441 441 /* This only applies to raw mode where we only support 1 VCPU. */ 442 442 PVMCPU pVCpu = VMMGetCpu0(pVM); 443 # ifdef IN_RC 444 Assert(pShwPageCR3 != pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); 445 # endif 443 # error fixme 446 444 447 445 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r80163 r80172 68 68 ( (a_rcStrict) == VINF_SUCCESS \ 69 69 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT) 70 #elif defined(IN_RING0) || defined(IN_RC)70 #elif defined(IN_RING0) 71 71 #define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \ 72 72 ( (a_rcStrict) == VINF_SUCCESS \ … … 110 110 # define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \ 111 111 (false /* no virtual handlers in ring-0! */ ) 112 #elif defined(IN_RC)113 # define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \114 ( (a_rcStrict) == VINF_SUCCESS \115 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \116 \117 || ((a_fWrite) ? (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT : 0) \118 || ((a_fWrite) ? (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT : 0) \119 || ((a_fWrite) ? (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT : 0) \120 || ((a_fWrite) ? (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT : 0) \121 || ((a_fWrite) ? (a_rcStrict) == VINF_SELM_SYNC_GDT : 0) \122 || ((a_fWrite) ? (a_rcStrict) == VINF_CSAM_PENDING_ACTION : 0) \123 || (a_rcStrict) == VINF_PATM_CHECK_PATCH_PAGE \124 \125 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \126 || (a_rcStrict) == VINF_EM_DBG_STOP \127 || (a_rcStrict) == VINF_EM_DBG_EVENT \128 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \129 )130 112 #else 131 113 # error "Context?" … … 880 862 PGM_INVL_ALL_VCPU_TLBS(pVM); 881 863 882 #ifndef IN_RC883 864 /* 884 865 * Notify NEM about the mapping change for this page. … … 902 883 } 903 884 } 904 #endif905 885 906 886 return rc; … … 1091 1071 pVM->pgm.s.cWrittenToPages++; 1092 1072 1093 #ifndef IN_RC1094 1073 /* 1095 1074 * Notify NEM about the protection change so we won't spin forever. … … 1106 1085 PGM_PAGE_SET_NEM_STATE(pPage, u2State); 1107 1086 } 1108 #else1109 RT_NOREF(GCPhys);1110 #endif1111 1087 } 1112 1088 … … 1182 1158 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER); 1183 1159 1184 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)1160 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1185 1161 /* 1186 1162 * Map it by HCPhys. … … 1257 1233 NOREF(GCPhys); 1258 1234 1259 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)1235 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1260 1236 /* 1261 1237 * Just some sketchy GC/R0-darwin code. … … 1267 1243 return VINF_SUCCESS; 1268 1244 1269 #else /* IN_RING3 || IN_RING0 */1245 #else /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1270 1246 1271 1247 … … 1338 1314 else 1339 1315 { 1340 # ifdef IN_RING01316 # ifdef IN_RING0 1341 1317 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk); 1342 1318 AssertRCReturn(rc, rc); 1343 1319 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk); 1344 1320 Assert(pMap); 1345 # else1321 # else 1346 1322 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap); 1347 1323 if (RT_FAILURE(rc)) 1348 1324 return rc; 1349 # endif1325 # endif 1350 1326 AssertPtr(pMap->pv); 1351 1327 } … … 1361 1337 *ppMap = pMap; 1362 1338 return VINF_SUCCESS; 1363 #endif /* IN_RING3*/1339 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1364 1340 } 1365 1341 … … 1455 1431 } 1456 1432 1457 #if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)1433 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1458 1434 1459 1435 /** … … 1525 1501 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg); 1526 1502 } 1527 # ifdef PGM_WITH_PHYS_TLB1503 # ifdef PGM_WITH_PHYS_TLB 1528 1504 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW 1529 1505 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM) … … 1531 1507 else 1532 1508 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */ 1533 # else1509 # else 1534 1510 pTlbe->GCPhys = NIL_RTGCPHYS; 1535 # endif1511 # endif 1536 1512 pTlbe->pPage = pPage; 1537 1513 return VINF_SUCCESS; 1538 1514 } 1539 1515 1540 #endif /* ! IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */1516 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1541 1517 1542 1518 /** … … 1579 1555 * Get the mapping address. 1580 1556 */ 1581 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)1557 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1582 1558 void *pv; 1583 1559 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), … … 1598 1574 } 1599 1575 1600 #if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)1576 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1601 1577 1602 1578 /** … … 1666 1642 } 1667 1643 1668 #endif /* ! IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */1644 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1669 1645 1670 1646 … … 1708 1684 * Do the job. 1709 1685 */ 1710 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)1686 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1711 1687 void *pv; 1712 1688 PVMCPU pVCpu = VMMGetCpu(pVM); … … 1760 1736 * Do the job. 1761 1737 */ 1762 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)1738 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1763 1739 void *pv; 1764 1740 PVMCPU pVCpu = VMMGetCpu(pVM); … … 1823 1799 AssertRCReturn(rc, rc); 1824 1800 1825 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)1801 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1826 1802 /* 1827 1803 * Find the page and make sure it's writable. … … 1855 1831 } 1856 1832 1857 #else /* IN_RING3 || IN_RING0 */1833 #else /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1858 1834 /* 1859 1835 * Query the Physical TLB entry for the page (may fail). … … 1887 1863 } 1888 1864 1889 #endif /* IN_RING3 || IN_RING0 */1865 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1890 1866 pgmUnlock(pVM); 1891 1867 return rc; … … 1926 1902 AssertRCReturn(rc, rc); 1927 1903 1928 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)1904 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1929 1905 /* 1930 1906 * Find the page and make sure it's readable. … … 1956 1932 } 1957 1933 1958 #else /* IN_RING3 || IN_RING0 */1934 #else /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1959 1935 /* 1960 1936 * Query the Physical TLB entry for the page (may fail). … … 1978 1954 } 1979 1955 1980 #endif /* IN_RING3 || IN_RING0 */1956 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1981 1957 pgmUnlock(pVM); 1982 1958 return rc; … … 2069 2045 VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock) 2070 2046 { 2071 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)2047 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 2072 2048 Assert(pLock->pvPage != NULL); 2073 2049 Assert(pLock->pVCpu == VMMGetCpu(pVM)); RT_NOREF_PV(pVM); … … 2125 2101 } 2126 2102 pgmUnlock(pVM); 2127 #endif /* IN_RING3*/2103 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 2128 2104 } 2129 2105 … … 2279 2255 2280 2256 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */ 2281 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)2257 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 2282 2258 NOREF(pVM); NOREF(pR3Ptr); RT_NOREF_PV(GCPhys); 2283 2259 AssertFailedReturn(VERR_NOT_IMPLEMENTED); … … 2297 2273 } 2298 2274 2299 #if 0 /*def ined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)*/2275 #if 0 /*def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 2300 2276 2301 2277 /** … … 4247 4223 */ 4248 4224 VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev, 4249 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)4225 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 4250 4226 R3PTRTYPE(uint8_t *) *ppb, 4251 4227 #else … … 4287 4263 break; 4288 4264 } 4289 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)4265 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 4290 4266 *pfTlb |= PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3; 4291 4267 *ppb = NULL; … … 4328 4304 break; 4329 4305 } 4330 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)4306 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 4331 4307 *pfTlb |= PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3; 4332 4308 *ppb = NULL; … … 4446 4422 } 4447 4423 4448 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)4424 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 4449 4425 void *pv; 4450 4426 rc = pgmRZDynMapHCPageInlined(pVCpu, … … 4546 4522 } 4547 4523 4548 #ifndef IN_RC4549 4524 4550 4525 /** … … 4691 4666 } 4692 4667 4693 #endif /* !IN_RC */4694 -
trunk/src/VBox/VMM/include/PGMInline.h
r80163 r80172 36 36 #include <VBox/vmm/gmm.h> 37 37 #include <VBox/vmm/hm.h> 38 #ifndef IN_RC 39 # include <VBox/vmm/nem.h> 40 #endif 38 #include <VBox/vmm/nem.h> 41 39 #include <iprt/asm.h> 42 40 #include <iprt/assert.h> … … 221 219 } 222 220 223 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)221 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 224 222 225 223 /** … … 402 400 } 403 401 404 #endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */405 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)406 402 407 403 /** … … 448 444 } 449 445 450 #endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */ 451 #ifndef IN_RC 446 #endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 452 447 453 448 /** … … 501 496 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits)); 502 497 rc = VINF_SUCCESS; 503 # 504 # 498 #if 0 //def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 499 # ifdef IN_RING3 505 500 if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR0) 506 # 501 # else 507 502 if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR3) 508 # 503 # endif 509 504 pTlbe->pv = pVM->pgm.s.CTX_SUFF(pvZeroPg); 510 # 505 #endif 511 506 AssertPtr(pTlbe->pv); 512 # 507 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 513 508 Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv)); 514 # 509 #endif 515 510 } 516 511 else … … 553 548 } 554 549 555 #endif /* !IN_RC */556 550 557 551 /** … … 586 580 } 587 581 588 #ifndef IN_RC589 582 /* Tell NEM. */ 590 583 if (VM_IS_NEM_ENABLED(pVM)) … … 596 589 PGM_PAGE_SET_NEM_STATE(pPage, u2State); 597 590 } 598 #endif599 591 } 600 592 … … 870 862 } 871 863 872 #ifndef IN_RC873 864 874 865 /** … … 1036 1027 } 1037 1028 1038 #endif /* !IN_RC */1039 1029 1040 1030 /** … … 1183 1173 } 1184 1174 1185 #ifndef IN_RC1186 1175 1187 1176 /** … … 1233 1222 } 1234 1223 1235 #endif /* !IN_RC */1236 1224 1237 1225 /** … … 1288 1276 * Just deal with the simple case here. 1289 1277 */ 1290 # 1278 #ifdef VBOX_STRICT 1291 1279 PVM pVM = pPool->CTX_SUFF(pVM); NOREF(pVM); 1292 # 1293 # 1280 #endif 1281 #ifdef LOG_ENABLED 1294 1282 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage); 1295 # 1283 #endif 1296 1284 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage); 1297 1285 if (cRefs == 1) -
trunk/src/VBox/VMM/include/PGMInternal.h
r80167 r80172 92 92 */ 93 93 //#if 0 /* disabled again while debugging */ 94 #ifndef IN_RC 95 # define PGMPOOL_WITH_OPTIMIZED_DIRTY_PT 96 #endif 94 #define PGMPOOL_WITH_OPTIMIZED_DIRTY_PT 97 95 //#endif 98 96 … … 100 98 * Large page support enabled only on 64 bits hosts; applies to nested paging only. 101 99 */ 102 #if (HC_ARCH_BITS == 64) && !defined(IN_RC) 103 # define PGM_WITH_LARGE_PAGES 104 #endif 100 #define PGM_WITH_LARGE_PAGES 105 101 106 102 /** … … 108 104 * VMX_EXIT_EPT_MISCONFIG. 109 105 */ 110 #if 1 /* testing */ 111 # define PGM_WITH_MMIO_OPTIMIZATIONS 112 #endif 106 #define PGM_WITH_MMIO_OPTIMIZATIONS 113 107 114 108 /** … … 131 125 * but ~5% fewer faults. 132 126 */ 133 # define PGM_SYNC_NR_PAGES 127 # define PGM_SYNC_NR_PAGES 32 134 128 #else 135 # define PGM_SYNC_NR_PAGES 129 # define PGM_SYNC_NR_PAGES 8 136 130 #endif 137 131 … … 276 270 * @remark There is no need to assert on the result. 277 271 */ 278 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)272 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 279 273 # define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \ 280 274 pgmRZDynMapHCPageInlined(pVCpu, HCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS) … … 297 291 * @remark There is no need to assert on the result. 298 292 */ 299 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)293 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 300 294 # define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \ 301 295 pgmRZDynMapGCPageV2Inlined(pVM, pVCpu, GCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS) … … 345 339 * @remark There is no need to assert on the result. 346 340 */ 347 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)341 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 348 342 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \ 349 343 pgmRZDynMapGCPageOffInlined(VMMGetCpu(pVM), GCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS) … … 362 356 * @param pvPage The pool page. 363 357 */ 364 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)358 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 365 359 # ifdef LOG_ENABLED 366 360 # define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage) pgmRZDynMapUnusedHint(pVCpu, pvPage, RT_SRC_POS) … … 390 384 * @param GCVirt The virtual address of the page to invalidate. 391 385 */ 392 #ifdef IN_R C393 # define PGM_INVL_PG(pVCpu, GCVirt) ASMInvalidatePage((uintptr_t)(GCVirt))394 #elif defined(IN_RING 0)386 #ifdef IN_RING0 387 # define PGM_INVL_PG(pVCpu, GCVirt) HMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt)) 388 #elif defined(IN_RING3) 395 389 # define PGM_INVL_PG(pVCpu, GCVirt) HMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt)) 396 390 #else 397 # define PGM_INVL_PG(pVCpu, GCVirt) HMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))391 # error "Not IN_RING0 or IN_RING3!" 398 392 #endif 399 393 … … 404 398 * @param GCVirt The virtual address of the page to invalidate. 405 399 */ 406 #ifdef IN_RC 407 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) ASMInvalidatePage((uintptr_t)(GCVirt)) 408 #elif defined(IN_RING0) 400 #ifdef IN_RING0 409 401 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt)) 410 402 #else … … 418 410 * @param GCVirt The virtual address within the page directory to invalidate. 419 411 */ 420 #ifdef IN_RC 421 # define PGM_INVL_BIG_PG(pVCpu, GCVirt) ASMReloadCR3() 422 #elif defined(IN_RING0) 412 #ifdef IN_RING0 423 413 # define PGM_INVL_BIG_PG(pVCpu, GCVirt) HMFlushTlb(pVCpu) 424 414 #else … … 431 421 * @param pVCpu The cross context virtual CPU structure. 432 422 */ 433 #ifdef IN_RC 434 # define PGM_INVL_VCPU_TLBS(pVCpu) ASMReloadCR3() 435 #elif defined(IN_RING0) 423 #ifdef IN_RING0 436 424 # define PGM_INVL_VCPU_TLBS(pVCpu) HMFlushTlb(pVCpu) 437 425 #else … … 444 432 * @param pVM The cross context VM structure. 445 433 */ 446 #ifdef IN_RC 447 # define PGM_INVL_ALL_VCPU_TLBS(pVM) ASMReloadCR3() 448 #elif defined(IN_RING0) 434 #ifdef IN_RING0 449 435 # define PGM_INVL_ALL_VCPU_TLBS(pVM) HMFlushTlbOnAllVCpus(pVM) 450 436 #else … … 1869 1855 { 1870 1856 /** Pointer to the page. */ 1871 #ifndef IN_RC1872 1857 RTR0PTR pvPage; 1873 #else1874 RTRCPTR pvPage;1875 # if HC_ARCH_BITS == 641876 uint32_t u32Alignment2;1877 # endif1878 #endif1879 1858 /** The mapping cache index. */ 1880 1859 uint16_t iPage; … … 1962 1941 /** @typedef PPPGMPAGEMAP 1963 1942 * Pointer to a page mapper unit pointer for current context. */ 1964 #if defined(IN_RC) && !defined(DOXYGEN_RUNNING) 1965 // typedef PPGMPAGEGCMAPTLB PPGMPAGEMAPTLB; 1966 // typedef PPGMPAGEGCMAPTLBE PPGMPAGEMAPTLBE; 1967 // typedef PPGMPAGEGCMAPTLBE *PPPGMPAGEMAPTLBE; 1968 # define PGM_PAGEMAPTLB_ENTRIES PGM_PAGEGCMAPTLB_ENTRIES 1969 # define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGEGCMAPTLB_IDX(GCPhys) 1970 typedef void * PPGMPAGEMAP; 1971 typedef void ** PPPGMPAGEMAP; 1972 //#elif IN_RING0 1943 #if defined(IN_RING0) && 0 1973 1944 // typedef PPGMPAGER0MAPTLB PPGMPAGEMAPTLB; 1974 1945 // typedef PPGMPAGER0MAPTLBE PPGMPAGEMAPTLBE; … … 2475 2446 * @remark There is no need to assert on the result. 2476 2447 */ 2477 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)2448 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 2478 2449 # define PGMPOOL_PAGE_2_PTR(a_pVM, a_pPage) pgmPoolMapPageInlined((a_pVM), (a_pPage) RTLOG_COMMA_SRC_POS) 2479 2450 #elif defined(VBOX_STRICT) || 1 /* temporarily going strict here */ … … 2502 2473 * @remark There is no need to assert on the result. 2503 2474 */ 2504 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)2475 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 2505 2476 # define PGMPOOL_PAGE_2_PTR_V2(a_pVM, a_pVCpu, a_pPage) pgmPoolMapPageV2Inlined((a_pVM), (a_pVCpu), (a_pPage) RTLOG_COMMA_SRC_POS) 2506 2477 #else … … 2794 2765 * @{ 2795 2766 */ 2796 #ifdef IN_RC 2797 # define PGM_CTX(a,b) a##RC##b 2798 # define PGM_CTX_STR(a,b) a "GC" b 2799 # define PGM_CTX_DECL(type) VMMRCDECL(type) 2767 #ifdef IN_RING3 2768 # define PGM_CTX(a,b) a##R3##b 2769 # define PGM_CTX_STR(a,b) a "R3" b 2770 # define PGM_CTX_DECL(type) DECLCALLBACK(type) 2771 #elif defined(IN_RING0) 2772 # define PGM_CTX(a,b) a##R0##b 2773 # define PGM_CTX_STR(a,b) a "R0" b 2774 # define PGM_CTX_DECL(type) VMMDECL(type) 2800 2775 #else 2801 # ifdef IN_RING3 2802 # define PGM_CTX(a,b) a##R3##b 2803 # define PGM_CTX_STR(a,b) a "R3" b 2804 # define PGM_CTX_DECL(type) DECLCALLBACK(type) 2805 # else 2806 # define PGM_CTX(a,b) a##R0##b 2807 # define PGM_CTX_STR(a,b) a "R0" b 2808 # define PGM_CTX_DECL(type) VMMDECL(type) 2809 # endif 2776 # error "Not IN_RING3 or IN_RING0!" 2810 2777 #endif 2811 2778 … … 2970 2937 2971 2938 /** The length of g_aPgmGuestModeData. */ 2972 #if defined(VBOX_WITH_64_BITS_GUESTS) && !defined(IN_RC)2939 #ifdef VBOX_WITH_64_BITS_GUESTS 2973 2940 # define PGM_GUEST_MODE_DATA_ARRAY_SIZE (PGM_TYPE_AMD64 + 1) 2974 2941 #else … … 2997 2964 2998 2965 /** The length of g_aPgmShadowModeData. */ 2999 #ifndef IN_RC 3000 # define PGM_SHADOW_MODE_DATA_ARRAY_SIZE PGM_TYPE_END 3001 #else 3002 # define PGM_SHADOW_MODE_DATA_ARRAY_SIZE (PGM_TYPE_PAE + 1) 3003 #endif 2966 #define PGM_SHADOW_MODE_DATA_ARRAY_SIZE PGM_TYPE_END 3004 2967 /** The shadow mode data array. */ 3005 2968 extern PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE]; … … 3032 2995 3033 2996 /** The length of g_aPgmBothModeData. */ 3034 #ifndef IN_RC 3035 # define PGM_BOTH_MODE_DATA_ARRAY_SIZE ((PGM_TYPE_END - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END) 3036 #else 3037 # define PGM_BOTH_MODE_DATA_ARRAY_SIZE ((PGM_TYPE_PAE + 1 - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END) 3038 #endif 2997 #define PGM_BOTH_MODE_DATA_ARRAY_SIZE ((PGM_TYPE_END - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END) 3039 2998 /** The guest+shadow mode data array. */ 3040 2999 extern PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE]; … … 3788 3747 uint32_t uPadding0; /**< structure size alignment. */ 3789 3748 3790 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(VBOX_WITH_RAW_MODE)3749 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 3791 3750 /** Automatically tracked physical memory mapping set. 3792 3751 * Ring-0 and strict raw-mode builds. */ … … 4035 3994 bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys); 4036 3995 void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, bool fDoAccounting); 4037 #ifdef VBOX_WITH_RAW_MODE4038 PPGMVIRTHANDLER pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, unsigned *piPage);4039 DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser);4040 # if defined(VBOX_STRICT) || defined(LOG_ENABLED)4041 void pgmHandlerVirtualDumpPhysPages(PVM pVM);4042 # else4043 # define pgmHandlerVirtualDumpPhysPages(a) do { } while (0)4044 # endif4045 #endif /* VBOX_WITH_RAW_MODE */4046 3996 DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 4047 3997 int pgmR3InitSavedState(PVM pVM, uint64_t cbRam); … … 4101 4051 4102 4052 #endif /* IN_RING3 */ 4103 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)4053 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 4104 4054 int pgmRZDynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL); 4105 4055 int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL);
Note:
See TracChangeset
for help on using the changeset viewer.