Changeset 107194 in vbox
- Timestamp:
- Nov 29, 2024 2:47:06 PM (5 months ago)
- svn:sync-xref-src-repo-rev:
- 166196
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 23 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r107113 r107194 137 137 VMMR3/EM.cpp \ 138 138 VMMR3/EMR3Dbg.cpp \ 139 VMMR3/EMHM.cpp\139 $(if-expr defined(VBOX_WITH_HWVIRT),VMMR3/EMHM.cpp,) \ 140 140 VMMR3/EMR3Nem.cpp \ 141 141 VMMR3/GCM.cpp \ -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r107179 r107194 2065 2065 } 2066 2066 2067 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 2067 2068 2068 2069 /** … … 2087 2088 * Do we need to do anything special? 2088 2089 */ 2089 # ifdef IN_RING32090 # ifdef IN_RING3 2090 2091 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC)) 2091 # else2092 # else 2092 2093 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3)) 2093 # endif2094 # endif 2094 2095 { 2095 2096 /* 2096 2097 * Allocate pages only if we're out of them, or in ring-3, almost out. 2097 2098 */ 2098 # ifdef IN_RING32099 # ifdef IN_RING3 2099 2100 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC) 2100 # else2101 # else 2101 2102 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC) 2102 # endif2103 # endif 2103 2104 { 2104 2105 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n", 2105 2106 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )); 2106 # ifdef IN_RING32107 # ifdef IN_RING3 2107 2108 int rc = PGMR3PhysAllocateHandyPages(pVM); 2108 # else2109 # else 2109 2110 int rc = pgmR0PhysAllocateHandyPages(pVM, VMMGetCpuId(pVM), false /*fRing3*/); 2110 # endif2111 # endif 2111 2112 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 2112 2113 { … … 2121 2122 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES)); 2122 2123 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)); 2123 # ifndef IN_RING32124 # ifndef IN_RING3 2124 2125 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */ 2125 # endif2126 # endif 2126 2127 } 2127 2128 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0 … … 2134 2135 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF) 2135 2136 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES); 2136 # ifndef IN_RING32137 # ifndef IN_RING3 2137 2138 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3) 2138 2139 { … … 2140 2141 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); 2141 2142 } 2142 # endif2143 # endif 2143 2144 } 2144 2145 } … … 2331 2332 } 2332 2333 2333 # ifdef PGM_WITH_LARGE_PAGES2334 # ifdef PGM_WITH_LARGE_PAGES 2334 2335 2335 2336 /** … … 2408 2409 * Do the allocation. 2409 2410 */ 2410 # ifdef IN_RING32411 # ifdef IN_RING3 2411 2412 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE, GCPhysBase, NULL); 2412 # elif defined(IN_RING0)2413 # elif defined(IN_RING0) 2413 2414 rc = pgmR0PhysAllocateLargePage(pVM, VMMGetCpuId(pVM), GCPhysBase); 2414 # else2415 # error "Port me"2416 # endif2415 # else 2416 # error "Port me" 2417 # endif 2417 2418 if (RT_SUCCESS(rc)) 2418 2419 { … … 2496 2497 } 2497 2498 2498 #endif /* PGM_WITH_LARGE_PAGES */ 2499 # endif /* PGM_WITH_LARGE_PAGES */ 2500 #endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */ 2501 2499 2502 2500 2503 … … 2585 2588 RT_FALL_THRU(); 2586 2589 case PGM_PAGE_STATE_SHARED: 2590 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 2587 2591 return pgmPhysAllocPage(pVM, pPage, GCPhys); 2592 #else 2593 AssertFailed(); /** @todo not sure if we make use of ZERO pages or not in NEM-mode, but I can't see how pgmPhysAllocPage would work. */ 2594 return VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE; 2595 #endif 2588 2596 2589 2597 /* Not allowed to write to ballooned pages. */ … … 2593 2601 } 2594 2602 2595 2603 #if 0 /* unused */ 2596 2604 /** 2597 2605 * Internal usage: Map the page specified by its GMM ID. … … 2665 2673 #endif 2666 2674 } 2667 2675 #endif /* unused */ 2668 2676 2669 2677 /** … … 3078 3086 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc)); 3079 3087 } 3080 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0 );3088 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0 || PGM_IS_IN_NEM_MODE(pVM)); 3081 3089 3082 3090 /* … … 3213 3221 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc)); 3214 3222 } 3215 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0 );3223 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0 || PGM_IS_IN_NEM_MODE(pVM)); 3216 3224 3217 3225 /* … … 3250 3258 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM); 3251 3259 PGM_LOCK_ASSERT_OWNER(pVM); 3252 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0 );3260 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0 || PGM_IS_IN_NEM_MODE(pVM)); 3253 3261 3254 3262 /* … … 3736 3744 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc)); 3737 3745 } 3738 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0 );3746 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0 || PGM_IS_IN_NEM_MODE(pVM)); 3739 3747 3740 3748 /* -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r106920 r107194 2514 2514 difference between GIP and system time matters on systems with high resolution 2515 2515 system time. So, convert the input from GIP to System time in that case. */ 2516 Assert(ASM GetFlags() & X86_EFL_IF);2516 Assert(ASMIntAreEnabled()); 2517 2517 const uint64_t u64NowSys = RTTimeSystemNanoTS(); 2518 2518 const uint64_t u64NowGip = RTTimeNanoTS(); … … 2699 2699 * While we're here, do a round of scheduling. 2700 2700 */ 2701 Assert(ASM GetFlags() & X86_EFL_IF);2701 Assert(ASMIntAreEnabled()); 2702 2702 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */ 2703 2703 pGVM->gvmm.s.StatsSched.cWakeUpWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now); … … 2967 2967 pGVM->gvmm.s.StatsSched.cPollCalls++; 2968 2968 2969 Assert(ASM GetFlags() & X86_EFL_IF);2969 Assert(ASMIntAreEnabled()); 2970 2970 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */ 2971 2971 -
trunk/src/VBox/VMM/VMMR3/DBGFR3Bp.cpp
r107110 r107194 319 319 && !pUVM->dbgf.s.paBpLocL1R3) 320 320 { 321 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 321 322 if (!SUPR3IsDriverless()) 322 323 { … … 330 331 } 331 332 else 333 #endif 332 334 { 333 335 /* Driverless: Do dbgfR0BpInitWorker here, ring-3 style. */ … … 382 384 && !pUVM->dbgf.s.paBpLocPortIoR3) 383 385 { 386 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 384 387 if (!SUPR3IsDriverless()) 385 388 { … … 393 396 } 394 397 else 398 #endif 395 399 { 396 400 /* Driverless: Do dbgfR0BpPortIoInitWorker here, ring-3 style. */ … … 455 459 if (pUVM->dbgf.s.pbmBpOwnersAllocR3) 456 460 { 461 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 457 462 if (!SUPR3IsDriverless()) 458 463 { … … 470 475 } 471 476 else 477 #endif 472 478 { 473 479 /* Driverless: Do dbgfR0BpOwnerInitWorker here, ring-3 style. */ … … 624 630 if (RT_LIKELY(pbmAlloc)) 625 631 { 632 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 626 633 if (!SUPR3IsDriverless()) 627 634 { … … 638 645 } 639 646 else 647 #endif 640 648 { 641 649 /* Driverless: Do dbgfR0BpChunkAllocWorker here, ring-3 style. */ … … 845 853 if (RT_LIKELY(pbmAlloc)) 846 854 { 855 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 847 856 if (!SUPR3IsDriverless()) 848 857 { … … 859 868 } 860 869 else 870 #endif 861 871 { 862 872 /* Driverless: Do dbgfR0BpL2TblChunkAllocWorker here, ring-3 style. */ -
trunk/src/VBox/VMM/VMMR3/DBGFR3Tracer.cpp
r106061 r107194 928 928 { 929 929 PDBGFTRACERINSR3 pThis = NULL; 930 RT_NOREF(fR0Enabled); 930 931 931 932 /* 932 933 * Allocate the tracer instance. 933 934 */ 935 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 934 936 if ((fR0Enabled /*|| fRCEnabled*/) && !SUPR3IsDriverless()) 935 937 { … … 950 952 } 951 953 else 954 #endif 952 955 { 953 956 /* The code in this else branch works by the same rules as the DBGFR0Tracer.cpp -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r107113 r107194 881 881 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER) 882 882 AssertLogRelMsgFailedStmt(("Bad EM state."), rc = VERR_EM_INTERNAL_ERROR); 883 #if !defined(VBOX_VMM_TARGET_ARMV8)883 #ifdef VBOX_WITH_HWVIRT 884 884 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM) 885 885 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/); 886 #endif 887 #if !defined(VBOX_VMM_TARGET_ARMV8) 886 888 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM) 887 889 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/)); … … 1222 1224 1223 1225 /* 1224 * Execute everything in IEM?1226 * Can we use the default engine. IEM is the fallback. 1225 1227 */ 1226 if ( pVM->em.s.fIemExecutesAll1227 || VM_IS_EXEC_ENGINE_IEM(pVM))1228 #ifdef VBOX_WITH_IEM_RECOMPILER 1229 return pVM->em.s.fIemRecompiled ? EMSTATE_RECOMPILER : EMSTATE_IEM;1230 # else1231 return EMSTATE_IEM;1232 #endif 1233 1234 #if !defined(VBOX_VMM_TARGET_ARMV8) 1235 if (VM_IS_HM_ENABLED(pVM)) 1236 { 1237 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))1238 return EMSTATE_HM;1239 }1240 else1241 #endif 1242 if (NEMR3CanExecuteGuest(pVM, pVCpu))1243 return EMSTATE_NEM;1244 1245 /*1246 * Note! Raw mode and hw accelerated mode are incompatible. The latter1247 * turns off monitoring features essential for raw mode!1248 */1228 if (!pVM->em.s.fIemExecutesAll) 1229 { 1230 switch (pVM->bMainExecutionEngine) 1231 { 1232 #ifdef VBOX_WITH_HWVIRT 1233 case VM_EXEC_ENGINE_HW_VIRT: 1234 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx)) 1235 return EMSTATE_HM; 1236 break; 1237 #endif 1238 #ifdef VBOX_WITH_NATIVE_NEM 1239 case VM_EXEC_ENGINE_NATIVE_API: 1240 if (NEMR3CanExecuteGuest(pVM, pVCpu)) 1241 return EMSTATE_NEM; 1242 break; 1243 #endif 1244 case VM_EXEC_ENGINE_IEM: 1245 break; 1246 default: 1247 AssertMsgFailed(("bMainExecutionEngine=%d\n", pVM->bMainExecutionEngine)); 1248 break; 1249 } 1250 } 1249 1251 #ifdef VBOX_WITH_IEM_RECOMPILER 1250 1252 return pVM->em.s.fIemRecompiled ? EMSTATE_RECOMPILER : EMSTATE_IEM; … … 1516 1518 } 1517 1519 1520 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 1518 1521 /* 1519 1522 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory. … … 1526 1529 return rc; 1527 1530 } 1531 #endif 1528 1532 1529 1533 /* check that we got them all */ … … 1968 1972 #endif /* VBOX_VMM_TARGET_ARMV8 */ 1969 1973 1974 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 1970 1975 /* 1971 1976 * Allocate handy pages. … … 1976 1981 UPDATE_RC(); 1977 1982 } 1983 #endif 1978 1984 1979 1985 /* … … 2045 2051 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) 2046 2052 { 2053 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 2047 2054 rc2 = PGMR3PhysAllocateHandyPages(pVM); 2055 #else 2056 rc2 = VINF_EM_NO_MEMORY; 2057 #endif 2048 2058 UPDATE_RC(); 2049 2059 if (rc == VINF_EM_NO_MEMORY) … … 2506 2516 */ 2507 2517 case EMSTATE_HM: 2508 #if defined(VBOX_VMM_TARGET_ARMV8)2509 AssertReleaseFailed(); /* Should never get here. */2518 #ifdef VBOX_WITH_HWVIRT 2519 rc = emR3HmExecute(pVM, pVCpu, &fFFDone); 2510 2520 #else 2511 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);2521 AssertReleaseFailedStmt(rc = VERR_EM_INTERNAL_ERROR); /* Should never get here. */ 2512 2522 #endif 2513 2523 break; -
trunk/src/VBox/VMM/VMMR3/EMHM.cpp
r106061 r107194 60 60 #include "EMInline.h" 61 61 62 #ifndef VBOX_WITH_HWVIRT 63 # error "VBOX_WITH_HWVIRT misconfig!" 64 #endif 65 62 66 63 67 /********************************************************************************************************************************* -
trunk/src/VBox/VMM/VMMR3/EMR3Nem.cpp
r106061 r107194 329 329 } 330 330 331 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 331 332 /* 332 333 * Allocate handy pages (just in case the above actions have consumed some pages). … … 338 339 return rc; 339 340 } 341 #endif 340 342 341 343 /* -
trunk/src/VBox/VMM/VMMR3/IOM.cpp
r106061 r107194 219 219 if (enmWhat == VMINITCOMPLETED_RING0) 220 220 { 221 # if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 221 222 /* 222 223 * Synchronize the ring-3 I/O port and MMIO statistics indices into the … … 229 230 AssertLogRelRCReturn(rc, rc); 230 231 } 232 # endif 231 233 232 234 /* -
trunk/src/VBox/VMM/VMMR3/IOMR3IoPort.cpp
r106061 r107194 223 223 224 224 int rc; 225 # if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 225 226 if (!SUPR3IsDriverless()) 226 227 { … … 230 231 } 231 232 else 233 # endif 232 234 { 233 235 /* … … 283 285 284 286 int rc; 287 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 285 288 if (!SUPR3IsDriverless()) 286 289 { … … 290 293 } 291 294 else 295 #endif 292 296 { 293 297 /* -
trunk/src/VBox/VMM/VMMR3/IOMR3Mmio.cpp
r106061 r107194 135 135 136 136 int rc; 137 # if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 137 138 if (!SUPR3IsDriverless()) 138 139 { … … 142 143 } 143 144 else 145 # endif 144 146 { 145 147 /* … … 195 197 196 198 int rc; 199 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 197 200 if (!SUPR3IsDriverless()) 198 201 { … … 202 205 } 203 206 else 207 #endif 204 208 { 205 209 /* -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win-armv8.cpp
r107113 r107194 1020 1020 } 1021 1021 1022 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 1022 1023 if (!SUPR3IsDriverless()) 1023 1024 { … … 1030 1031 "/NEM/R0Stats/cPagesInUse"); 1031 1032 } 1033 #endif /* VBOX_WITH_R0_MODULES && !VBOX_WITH_MINIMAL_R0 */ 1032 1034 } 1033 1034 1035 } 1035 1036 1036 } 1037 1037 } -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
r107113 r107194 1381 1381 } 1382 1382 1383 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 1383 1384 if (!SUPR3IsDriverless()) 1384 1385 { … … 1391 1392 "/NEM/R0Stats/cPagesInUse"); 1392 1393 } 1394 #endif /* VBOX_WITH_R0_MODULES && !VBOX_WITH_MINIMAL_R0 */ 1393 1395 1394 1396 } -
trunk/src/VBox/VMM/VMMR3/PDMDevice.cpp
r107113 r107194 289 289 } 290 290 291 /* RZEnabled, R0Enabled, RCEnabled*/ 291 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) /** @todo not entirely correct for new-RC; */ 292 /* R0Enabled, RCEnabled*/ 292 293 bool fR0Enabled = false; 293 294 bool fRCEnabled = false; 294 295 if ( (pReg->fFlags & (PDM_DEVREG_FLAGS_R0 | PDM_DEVREG_FLAGS_RC)) 295 # ifdef VBOX_WITH_PGM_NEM_MODE296 # ifdef VBOX_WITH_PGM_NEM_MODE 296 297 && !PGMR3IsNemModeEnabled(pVM) /* No ring-0 in simplified memory mode. */ 297 # endif298 # endif 298 299 && !SUPR3IsDriverless()) 299 300 { … … 323 324 } 324 325 } 326 #endif /* VBOX_WITH_R0_MODULES && !VBOX_WITH_MINIMAL_R0 */ 325 327 326 328 #ifdef VBOX_WITH_DBGF_TRACING … … 368 370 PPDMDEVINS pDevIns; 369 371 PPDMCRITSECT pCritSect; 372 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 370 373 if (fR0Enabled || fRCEnabled) 371 374 { … … 396 399 Req.afReserved[1] = false; 397 400 Req.afReserved[2] = false; 398 # ifdef VBOX_WITH_DBGF_TRACING401 # ifdef VBOX_WITH_DBGF_TRACING 399 402 Req.hDbgfTracerEvtSrc = hDbgfTraceEvtSrc; 400 # else403 # else 401 404 Req.hDbgfTracerEvtSrc = NIL_DBGFTRACEREVTSRC; 402 # endif405 # endif 403 406 rc = RTStrCopy(Req.szDevName, sizeof(Req.szDevName), pReg->szName); 404 407 AssertLogRelRCReturn(rc, rc); … … 417 420 } 418 421 else 422 #endif /* VBOX_WITH_R0_MODULES && !VBOX_WITH_MINIMAL_R0 */ 419 423 { 420 424 /* The code in this else branch works by the same rules as the PDMR0Device.cpp … … 569 573 } 570 574 575 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 571 576 /* 572 577 * Call the ring-0 constructor if applicable. … … 594 599 } 595 600 } 601 #endif /* VBOX_WITH_R0_MODULES && !VBOX_WITH_MINIMAL_R0 */ 596 602 597 603 } /* for device instances */ -
trunk/src/VBox/VMM/VMMR3/PDMQueue.cpp
r106061 r107194 94 94 VERR_OUT_OF_RANGE); 95 95 AssertReturn(!fRZEnabled || enmType == PDMQUEUETYPE_INTERNAL || enmType == PDMQUEUETYPE_DEV, VERR_INVALID_PARAMETER); 96 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 96 97 if (SUPR3IsDriverless()) 98 #endif 97 99 fRZEnabled = false; 98 100 … … 113 115 PPDMQUEUE pQueue; 114 116 PDMQUEUEHANDLE hQueue; 117 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 115 118 if (fRZEnabled) 116 119 { … … 143 146 } 144 147 else 148 #endif 145 149 { 146 150 /* Do it here using the paged heap: */ -
trunk/src/VBox/VMM/VMMR3/PGM-armv8.cpp
r107171 r107194 185 185 PCFGMNODE const pCfgPGM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/PGM"); 186 186 187 /** @todo RamPreAlloc doesn't work for NEM-mode. */ 187 188 int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "RamPreAlloc", &pVM->pgm.s.fRamPreAlloc, 188 189 #ifdef VBOX_WITH_PREALLOC_RAM_BY_DEFAULT … … 194 195 AssertLogRelRCReturn(rc, rc); 195 196 197 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 196 198 rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, UINT32_MAX); 197 199 AssertLogRelRCReturn(rc, rc); 198 200 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++) 199 201 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID; 202 #endif 200 203 201 204 /* … … 241 244 AssertRCReturn(rc, rc); 242 245 246 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 243 247 pgmR3PhysChunkInvalidateTLB(pVM, false /*fInRendezvous*/); /* includes pgmPhysInvalidatePageMapTLB call */ 248 #endif 244 249 245 250 /* -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r107176 r107194 824 824 for (size_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aPhysHandlerTypes); i++) 825 825 { 826 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 826 827 if (fDriverless) 828 #endif 827 829 pVM->pgm.s.aPhysHandlerTypes[i].hType = i | (RTRandU64() & ~(uint64_t)PGMPHYSHANDLERTYPE_IDX_MASK); 828 830 pVM->pgm.s.aPhysHandlerTypes[i].enmKind = PGMPHYSHANDLERKIND_INVALID; … … 881 883 AssertLogRelRCReturn(rc, rc); 882 884 883 #if HC_ARCH_BITS == 32 884 # ifdef RT_OS_DARWIN 885 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 886 # if HC_ARCH_BITS == 32 887 # ifdef RT_OS_DARWIN 885 888 rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, _1G / GMM_CHUNK_SIZE * 3); 889 # else 890 rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, _1G / GMM_CHUNK_SIZE); 891 # endif 886 892 # else 887 rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, _1G / GMM_CHUNK_SIZE);893 rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, UINT32_MAX); 888 894 # endif 889 #else890 rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, UINT32_MAX);891 #endif892 895 AssertLogRelRCReturn(rc, rc); 893 896 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++) 894 897 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID; 898 #endif 895 899 896 900 /* … … 946 950 AssertRCReturn(rc, rc); 947 951 952 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 948 953 pgmR3PhysChunkInvalidateTLB(pVM, false /*fInRendezvous*/); /* includes pgmPhysInvalidatePageMapTLB call */ 954 #endif 949 955 950 956 /* … … 979 985 pVM->pgm.s.HCPhysInvMmioPg = pVM->pgm.s.HCPhysMmioPg; 980 986 Log(("HCPhysInvMmioPg=%RHp abMmioPg=%p\n", pVM->pgm.s.HCPhysMmioPg, pVM->pgm.s.abMmioPg)); 981 #endif /* VBOX_WITH_ONLY_PGM_NEM_MODE */987 #endif 982 988 983 989 … … 1248 1254 STAM_REL_REG(pVM, &pPGM->cLargePages, STAMTYPE_U32, "/PGM/Page/cLargePages", STAMUNIT_COUNT, "The number of large pages allocated (includes disabled)."); 1249 1255 STAM_REL_REG(pVM, &pPGM->cLargePagesDisabled, STAMTYPE_U32, "/PGM/Page/cLargePagesDisabled", STAMUNIT_COUNT, "The number of disabled large pages."); 1256 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 1250 1257 STAM_REL_REG(pVM, &pPGM->ChunkR3Map.c, STAMTYPE_U32, "/PGM/ChunkR3Map/c", STAMUNIT_COUNT, "Number of mapped chunks."); 1251 1258 STAM_REL_REG(pVM, &pPGM->ChunkR3Map.cMax, STAMTYPE_U32, "/PGM/ChunkR3Map/cMax", STAMUNIT_COUNT, "Maximum number of mapped chunks."); 1252 1259 STAM_REL_REG(pVM, &pPGM->cMappedChunks, STAMTYPE_U32, "/PGM/ChunkR3Map/Mapped", STAMUNIT_COUNT, "Number of times we mapped a chunk."); 1253 1260 STAM_REL_REG(pVM, &pPGM->cUnmappedChunks, STAMTYPE_U32, "/PGM/ChunkR3Map/Unmapped", STAMUNIT_COUNT, "Number of times we unmapped a chunk."); 1261 #endif 1254 1262 1255 1263 STAM_REL_REG(pVM, &pPGM->StatLargePageReused, STAMTYPE_COUNTER, "/PGM/LargePage/Reused", STAMUNIT_OCCURENCES, "The number of times we've reused a large page."); … … 2749 2757 return Args.cErrors == 0 ? VINF_SUCCESS : VERR_INTERNAL_ERROR; 2750 2758 } 2759 -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r107177 r107194 4846 4846 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++) 4847 4847 { 4848 Assert(PGM_PAGE_GET_HCPHYS(pRamPage) == UINT64_C(0x0000fffffffff000) );4848 Assert(PGM_PAGE_GET_HCPHYS(pRamPage) == UINT64_C(0x0000fffffffff000) || PGM_PAGE_GET_HCPHYS(pRamPage) == 0); 4849 4849 Assert(PGM_PAGE_GET_PAGEID(pRamPage) == NIL_GMM_PAGEID); 4850 4850 Assert(PGM_PAGE_GET_STATE(pRamPage) == PGM_PAGE_STATE_ALLOCATED); … … 5032 5032 for (uint32_t iPage = 0; iPage < cGuestPages; iPage++, pRamPage++, pRomPage++) 5033 5033 { 5034 Assert(PGM_PAGE_GET_HCPHYS(pRamPage) == UINT64_C(0x0000fffffffff000) );5034 Assert(PGM_PAGE_GET_HCPHYS(pRamPage) == UINT64_C(0x0000fffffffff000) || PGM_PAGE_GET_HCPHYS(pRamPage) == 0); 5035 5035 Assert(PGM_PAGE_GET_PAGEID(pRamPage) == NIL_GMM_PAGEID); 5036 5036 Assert(PGM_PAGE_GET_STATE(pRamPage) == PGM_PAGE_STATE_ALLOCATED); … … 5830 5830 * Chunk Mappings and Page Allocation * 5831 5831 *********************************************************************************************************************************/ 5832 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 5832 5833 5833 5834 /** … … 5893 5894 return 0; 5894 5895 } 5895 # ifdef VBOX_STRICT5896 # ifdef VBOX_STRICT 5896 5897 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++) 5897 5898 { … … 5899 5900 Assert(pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk != pChunk->Core.Key); 5900 5901 } 5901 # endif5902 5903 # if 0 /* This is too much work with the PGMCPU::PhysTlb as well. We flush them all instead. */5902 # endif 5903 5904 # if 0 /* This is too much work with the PGMCPU::PhysTlb as well. We flush them all instead. */ 5904 5905 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++) 5905 5906 if (pVM->pgm.s.PhysTlbR3.aEntries[i].pMap == pChunk) 5906 5907 return 0; 5907 # endif5908 # endif 5908 5909 5909 5910 pArg->pChunk = pChunk; … … 6228 6229 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); 6229 6230 Assert(pVM->pgm.s.cHandyPages > 0); 6230 # ifdef VBOX_STRICT6231 # ifdef VBOX_STRICT 6231 6232 uint32_t i; 6232 6233 for (i = iClear; i < pVM->pgm.s.cHandyPages; i++) … … 6247 6248 RTAssertPanic(); 6248 6249 } 6249 # endif6250 # endif 6250 6251 } 6251 6252 else … … 6314 6315 } 6315 6316 6317 #endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */ 6318 6316 6319 6317 6320 /********************************************************************************************************************************* -
trunk/src/VBox/VMM/VMMR3/STAM.cpp
r106308 r107194 166 166 static char ** stamR3SplitPattern(const char *pszPat, unsigned *pcExpressions, char **ppszCopy); 167 167 static int stamR3EnumU(PUVM pUVM, const char *pszPat, bool fUpdateRing0, int (pfnCallback)(PSTAMDESC pDesc, void *pvArg), void *pvArg); 168 #ifdef VBOX_WITH_R0_MODULES 168 169 static void stamR3Ring0StatsRegisterU(PUVM pUVM); 170 #endif 169 171 170 172 #ifdef VBOX_WITH_DEBUGGER … … 196 198 197 199 200 #ifdef VBOX_WITH_R0_MODULES 198 201 /** 199 202 * The GVMM mapping records - sans the host cpus. … … 235 238 236 239 240 # ifndef VBOX_WITH_MINIMAL_R0 237 241 /** 238 242 * The GMM mapping records. … … 272 276 { RT_UOFFSETOF(GMMSTATS, VMStats.fMayAllocate), STAMTYPE_BOOL, STAMUNIT_NONE, "/GMM/VM/fMayAllocate", "Whether the VM is allowed to allocate memory or not." }, 273 277 }; 278 # endif /* !VBOX_WITH_MINIMAL_R0 */ 279 #endif /* VBOX_WITH_R0_MODULES */ 274 280 275 281 … … 320 326 pUVM->stam.s.pRoot = pRoot; 321 327 328 #ifdef VBOX_WITH_R0_MODULES 322 329 /* 323 330 * Register the ring-0 statistics (GVMM/GMM). … … 325 332 if (!SUPR3IsDriverless()) 326 333 stamR3Ring0StatsRegisterU(pUVM); 334 #endif 327 335 328 336 #ifdef VBOX_WITH_DEBUGGER … … 2535 2543 int rc = VINF_SUCCESS; 2536 2544 2537 /* ring-0 */ 2545 #ifdef VBOX_WITH_R0_MODULES 2546 /* 2547 * Check if the reset patterns cover anything related to ring-0. 2548 */ 2538 2549 GVMMRESETSTATISTICSSREQ GVMMReq; 2550 bool fGVMMMatched = (!pszPat || !*pszPat) && !SUPR3IsDriverless(); 2551 # ifndef VBOX_WITH_MINIMAL_R0 2539 2552 GMMRESETSTATISTICSSREQ GMMReq; 2540 bool fGVMMMatched = (!pszPat || !*pszPat) && !SUPR3IsDriverless();2541 2553 bool fGMMMatched = fGVMMMatched; 2554 # endif 2542 2555 if (fGVMMMatched) 2543 2556 { 2544 2557 memset(&GVMMReq.Stats, 0xff, sizeof(GVMMReq.Stats)); 2558 # ifndef VBOX_WITH_MINIMAL_R0 2545 2559 memset(&GMMReq.Stats, 0xff, sizeof(GMMReq.Stats)); 2560 # endif 2546 2561 } 2547 2562 else … … 2566 2581 } 2567 2582 2583 # ifndef VBOX_WITH_MINIMAL_R0 2568 2584 /* GMM */ 2569 2585 RT_ZERO(GMMReq.Stats); … … 2574 2590 fGMMMatched = true; 2575 2591 } 2592 # endif 2576 2593 2577 2594 RTMemTmpFree(papszExpressions); 2578 2595 RTStrFree(pszCopy); 2579 2596 } 2580 2597 #endif /* !VBOX_WITH_R0_MODULES */ 2598 2599 2600 /* 2601 * Grab the lock and do the resetting. 2602 */ 2581 2603 STAM_LOCK_WR(pUVM); 2582 2604 2605 #ifdef VBOX_WITH_R0_MODULES 2606 /* Reset ring-0 stats first. */ 2583 2607 if (fGVMMMatched) 2584 2608 { … … 2590 2614 } 2591 2615 2616 # ifndef VBOX_WITH_MINIMAL_R0 2592 2617 if (fGMMMatched) 2593 2618 { … … 2598 2623 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GMM_RESET_STATISTICS, 0, &GMMReq.Hdr); 2599 2624 } 2625 # endif 2626 #endif 2600 2627 2601 2628 /* and the reset */ … … 3395 3422 switch (iRefreshGroup) 3396 3423 { 3424 #ifdef VBOX_WITH_R0_MODULES 3397 3425 /* 3398 3426 * GVMM … … 3457 3485 } 3458 3486 3487 # ifndef VBOX_WITH_MINIMAL_R0 3459 3488 /* 3460 3489 * GMM … … 3478 3507 SUPR3CallVMMR0(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_NEM_UPDATE_STATISTICS, NULL); 3479 3508 break; 3509 # endif 3510 #endif /* VBOX_WITH_R0_MODULES */ 3480 3511 3481 3512 default: … … 3745 3776 3746 3777 3778 #ifdef VBOX_WITH_R0_MODULES 3747 3779 /** 3748 3780 * Registers the ring-0 statistics. … … 3789 3821 pUVM->stam.s.cRegisteredHostCpus = 0; 3790 3822 3823 # ifndef VBOX_WITH_MINIMAL_R0 3791 3824 /* GMM */ 3792 3825 for (unsigned i = 0; i < RT_ELEMENTS(g_aGMMStats); i++) … … 3794 3827 g_aGMMStats[i].enmType, STAMVISIBILITY_ALWAYS, g_aGMMStats[i].pszName, 3795 3828 g_aGMMStats[i].enmUnit, g_aGMMStats[i].pszDesc, STAM_REFRESH_GRP_GMM); 3796 } 3829 # endif 3830 } 3831 #endif /* VBOX_WITH_R0_MODULES */ 3797 3832 3798 3833 -
trunk/src/VBox/VMM/VMMR3/TM.cpp
r106975 r107194 1618 1618 */ 1619 1619 int rc; 1620 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 1620 1621 if (!SUPR3IsDriverless()) 1621 1622 { … … 1626 1627 } 1627 1628 else 1629 #endif 1628 1630 { 1629 1631 AssertReturn(cNewTimers <= _32K && cOldEntries <= _32K, VERR_TM_TOO_MANY_TIMERS); -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r107113 r107194 576 576 577 577 578 #ifdef VBOX_WITH_R0_MODULES 578 579 /* 579 580 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM. … … 591 592 } 592 593 } 594 #endif 593 595 594 596 /* -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r107113 r107194 289 289 return rc; 290 290 291 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 291 292 /* 292 293 * Register the Ring-0 VM handle with the session for fast ioctl calls. 293 294 */ 294 bool const fDriverless = SUPR3IsDriverless(); 295 if (!fDriverless) 295 if (!SUPR3IsDriverless()) 296 296 { 297 297 rc = SUPR3SetVMForFastIOCtl(VMCC_GET_VMR0_FOR_CALL(pVM)); … … 299 299 return rc; 300 300 } 301 #endif 301 302 302 303 #ifdef VBOX_WITH_NMI … … 312 313 * Start the log flusher thread. 313 314 */ 314 if (! fDriverless)315 if (!SUPR3IsDriverless()) 315 316 rc = RTThreadCreate(&pVM->vmm.s.hLogFlusherThread, vmmR3LogFlusher, pVM, 0 /*cbStack*/, 316 317 RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "R0LogWrk"); … … 350 351 * Statistics. 351 352 */ 353 #if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0) 352 354 STAM_REG(pVM, &pVM->vmm.s.StatRunGC, STAMTYPE_COUNTER, "/VMM/RunGC", STAMUNIT_OCCURENCES, "Number of context switches."); 353 355 STAM_REG(pVM, &pVM->vmm.s.StatRZRetNormal, STAMTYPE_COUNTER, "/VMM/RZRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns."); … … 402 404 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest, STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns."); 403 405 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR, STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR", STAMUNIT_OCCURENCES, "Number of VINF_EM_HM_PATCH_TPR_INSTR returns."); 406 #endif 404 407 405 408 STAMR3Register(pVM, &pVM->vmm.s.StatLogFlusherFlushes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, "/VMM/LogFlush/00-Flushes", STAMUNIT_OCCURENCES, "Total number of buffer flushes"); … … 1210 1213 #endif 1211 1214 1215 #ifdef VBOX_WITH_HWVIRT 1212 1216 1213 1217 /** … … 1219 1223 VMMR3_INT_DECL(int) VMMR3HmRunGC(PVM pVM, PVMCPU pVCpu) 1220 1224 { 1221 # if defined(VBOX_VMM_TARGET_ARMV8)1225 # if defined(VBOX_VMM_TARGET_ARMV8) 1222 1226 /* We should actually never get here as the only execution engine is NEM. */ 1223 1227 RT_NOREF(pVM, pVCpu); 1224 1228 AssertReleaseFailed(); 1225 1229 return VERR_NOT_SUPPORTED; 1226 # else1230 # else 1227 1231 Log2(("VMMR3HmRunGC: (cs:rip=%04x:%RX64)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu))); 1228 1232 … … 1230 1234 do 1231 1235 { 1232 # ifdef NO_SUPCALLR0VMM1236 # ifdef NO_SUPCALLR0VMM 1233 1237 rc = VERR_GENERAL_FAILURE; 1234 # else1238 # else 1235 1239 rc = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), VMMR0_DO_HM_RUN, pVCpu->idCpu); 1236 1240 if (RT_LIKELY(rc == VINF_SUCCESS)) 1237 1241 rc = pVCpu->vmm.s.iLastGZRc; 1238 # endif1242 # endif 1239 1243 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); 1240 1244 1241 # if 0 /** @todo triggers too often */1245 # if 0 /** @todo triggers too often */ 1242 1246 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3)); 1243 # endif1247 # endif 1244 1248 1245 1249 /* … … 1256 1260 } 1257 1261 return vmmR3HandleRing0Assert(pVM, pVCpu); 1258 #endif 1259 } 1260 1261 1262 /** 1263 * Perform one of the fast I/O control VMMR0 operation. 1264 * 1265 * @returns VBox strict status code. 1266 * @param pVM The cross context VM structure. 1267 * @param pVCpu The cross context virtual CPU structure. 1268 * @param enmOperation The operation to perform. 1269 */ 1270 VMMR3_INT_DECL(VBOXSTRICTRC) VMMR3CallR0EmtFast(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation) 1271 { 1272 VBOXSTRICTRC rcStrict; 1273 do 1274 { 1275 #ifdef NO_SUPCALLR0VMM 1276 rcStrict = VERR_GENERAL_FAILURE; 1277 #else 1278 rcStrict = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), enmOperation, pVCpu->idCpu); 1279 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 1280 rcStrict = pVCpu->vmm.s.iLastGZRc; 1281 #endif 1282 } while (rcStrict == VINF_EM_RAW_INTERRUPT_HYPER); 1283 1284 /* 1285 * Flush the logs 1286 */ 1287 #ifdef LOG_ENABLED 1288 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL); 1289 #endif 1290 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance()); 1291 if (rcStrict != VERR_VMM_RING0_ASSERTION) 1292 return rcStrict; 1293 return vmmR3HandleRing0Assert(pVM, pVCpu); 1294 } 1262 # endif 1263 } 1264 #endif /* VBOX_WITH_HWVIRT */ 1295 1265 1296 1266 -
trunk/src/VBox/VMM/include/PGMInternal.h
r107171 r107194 3180 3180 PDMCRITSECT CritSectX; 3181 3181 3182 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 3182 3183 /** 3183 3184 * Data associated with managing the ring-3 mappings of the allocation chunks. … … 3189 3190 /** The chunk tree, ordered by chunk id. */ 3190 3191 R3PTRTYPE(PAVLU32NODECORE) pTree; 3191 # if HC_ARCH_BITS == 323192 # if HC_ARCH_BITS == 32 3192 3193 uint32_t u32Alignment0; 3193 # endif3194 # endif 3194 3195 /** The number of mapped chunks. */ 3195 3196 uint32_t c; … … 3203 3204 uint32_t au32Alignment1[3]; 3204 3205 } ChunkR3Map; 3206 #endif 3205 3207 3206 3208 /** The page mapping TLB for ring-3. */ … … 3308 3310 uint32_t cReadLockedPages; /**< The number of read locked pages. */ 3309 3311 uint32_t cBalloonedPages; /**< The number of ballooned pages. */ 3312 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 3310 3313 uint32_t cMappedChunks; /**< Number of times we mapped a chunk. */ 3311 3314 uint32_t cUnmappedChunks; /**< Number of times we unmapped a chunk. */ 3315 #endif 3312 3316 uint32_t cLargePages; /**< The number of large pages. */ 3313 3317 uint32_t cLargePagesDisabled; /**< The number of disabled large pages. */ … … 3338 3342 AssertCompileMemberAlignment(PGM, CritSectX, 32); 3339 3343 AssertCompileMemberAlignment(PGM, CritSectX, 64); 3340 AssertCompileMemberAlignment(PGM, ChunkR3Map, 16);3341 3344 AssertCompileMemberAlignment(PGM, PhysTlbR3, 8); 3342 3345 AssertCompileMemberAlignment(PGM, PhysTlbR3, 16); … … 3344 3347 AssertCompileMemberAlignment(PGM, PhysTlbR0, 32); 3345 3348 # ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 3349 AssertCompileMemberAlignment(PGM, ChunkR3Map, 16); 3346 3350 AssertCompileMemberAlignment(PGM, HCPhysZeroPg, 8); 3347 3351 # endif … … 3941 3945 int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv); 3942 3946 int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv); 3947 #if 0 /* unused */ 3943 3948 int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv); 3949 #endif 3944 3950 int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr); 3945 3951 int pgmPhysGCPhys2CCPtrLockless(PVMCPUCC pVCpu, RTGCPHYS GCPhys, void **ppv);
Note:
See TracChangeset
for help on using the changeset viewer.