Changeset 14374 in vbox
- Timestamp:
- Nov 19, 2008 7:13:00 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGMInternal.h
r14301 r14374 2413 2413 RTHCPHYS aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)]; 2414 2414 2415 /** The address of the ring-0 mapping cache if we're making use of it. */ 2416 RTR0PTR pvR0DynMapUsed; 2417 #if HC_ARCH_BITS == 32 2418 RTR0PTR R0PtrPadding0; /**< Alignment. */ 2419 #endif 2420 2421 2415 2422 /** 4 MB page mask; 32 or 36 bits depending on PSE-36 */ 2416 2423 RTGCPHYS GCPhys4MBPSEMask; -
trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp
r14362 r14374 26 26 #include "../PGMInternal.h" 27 27 #include <VBox/vm.h> 28 #include <VBox/sup.h> 28 29 #include <VBox/err.h> 29 30 #include <iprt/asm.h> 31 #include <iprt/alloc.h> 30 32 #include <iprt/assert.h> 31 33 #include <iprt/cpuset.h> 32 34 #include <iprt/spinlock.h> 35 #include <iprt/semaphore.h> 33 36 34 37 … … 105 108 typedef struct PGMR0DYNMAP 106 109 { 107 /** The usual magic number / eye catcher . */110 /** The usual magic number / eye catcher (PGMR0DYNMAP_MAGIC). */ 108 111 uint32_t u32Magic; 109 112 /** Spinlock serializing the normal operation of the cache. */ … … 131 134 typedef PGMR0DYNMAP *PPGMR0DYNMAP; 132 135 136 /** PGMR0DYNMAP::u32Magic. (Jens Christian Bugge Wesseltoft) */ 137 #define PGMR0DYNMAP_MAGIC 0x19640201 138 133 139 134 140 /******************************************************************************* … … 139 145 140 146 147 /******************************************************************************* 148 * Internal Functions * 149 *******************************************************************************/ 150 static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs); 151 static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis); 152 static int pgmR0DynMapGrow(PPGMR0DYNMAP pThis); 153 static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis); 141 154 142 155 … … 148 161 VMMR0DECL(int) PGMR0DynMapInit(void) 149 162 { 150 return VINF_SUCCESS; 163 Assert(!g_pPGMR0DynMap); 164 165 /* 166 * Create and initialize the cache instance. 167 */ 168 PPGMR0DYNMAP pThis = (PPGMR0DYNMAP)RTMemAllocZ(sizeof(*pThis)); 169 AssertLogRelReturn(pThis, VERR_NO_MEMORY); 170 int rc = VINF_SUCCESS; 171 SUPPAGINGMODE enmMode = SUPR0GetPagingMode(); 172 switch (enmMode) 173 { 174 case SUPPAGINGMODE_32_BIT: 175 case SUPPAGINGMODE_32_BIT_GLOBAL: 176 pThis->fLegacyMode = false; 177 break; 178 case SUPPAGINGMODE_PAE: 179 case SUPPAGINGMODE_PAE_GLOBAL: 180 case SUPPAGINGMODE_PAE_NX: 181 case SUPPAGINGMODE_PAE_GLOBAL_NX: 182 case SUPPAGINGMODE_AMD64: 183 case SUPPAGINGMODE_AMD64_GLOBAL: 184 case SUPPAGINGMODE_AMD64_NX: 185 case SUPPAGINGMODE_AMD64_GLOBAL_NX: 186 pThis->fLegacyMode = false; 187 break; 188 default: 189 rc = VERR_INTERNAL_ERROR; 190 break; 191 } 192 if (RT_SUCCESS(rc)) 193 { 194 rc = RTSemFastMutexCreate(&pThis->hInitLock); 195 if (RT_SUCCESS(rc)) 196 { 197 rc = RTSpinlockCreate(&pThis->hSpinlock); 198 if (RT_SUCCESS(rc)) 199 { 200 pThis->u32Magic = PGMR0DYNMAP_MAGIC; 201 g_pPGMR0DynMap = pThis; 202 return VINF_SUCCESS; 203 } 204 RTSemFastMutexDestroy(pThis->hInitLock); 205 } 206 } 207 RTMemFree(pThis); 208 return rc; 151 209 } 152 210 … … 157 215 VMMR0DECL(void) PGMR0DynMapTerm(void) 158 216 { 217 /* 218 * Destroy the cache. 219 * 220 * There is not supposed to be any races here, the loader should 221 * make sure about that. So, don't bother locking anything. 222 * 223 * The VM objects should all be destroyed by now, so there is no 224 * dangling users or anything like that to clean up. This routine 225 * is just a mirror image of PGMR0DynMapInit. 226 */ 227 PPGMR0DYNMAP pThis = g_pPGMR0DynMap; 228 if (pThis) 229 { 230 AssertPtr(pThis); 231 g_pPGMR0DynMap = NULL; 232 233 AssertLogRelMsg(!pThis->cUsers && !pThis->paPages && !pThis->cPages, 234 ("cUsers=%d paPages=%p cPages=%#x\n", 235 pThis->cUsers, pThis->paPages, pThis->cPages)); 236 237 /* Free the associated resources. */ 238 RTSemFastMutexDestroy(pThis->hInitLock); 239 pThis->hInitLock = NIL_RTSEMFASTMUTEX; 240 RTSpinlockDestroy(pThis->hSpinlock); 241 pThis->hSpinlock = NIL_RTSPINLOCK; 242 pThis->u32Magic = UINT32_MAX; 243 RTMemFree(pThis); 244 } 159 245 } 160 246 … … 168 254 VMMR0DECL(int) PGMR0DynMapInitVM(PVM pVM) 169 255 { 170 NOREF(pVM); 256 /* 257 * Initialize the auto sets. 258 */ 259 VMCPUID idCpu = pVM->cCPUs; 260 while (idCpu-- > 0) 261 { 262 PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet; 263 uint32_t j = RT_ELEMENTS(pSet->aEntries); 264 while (j-- > 0) 265 { 266 pSet->aEntries[j].iPage = UINT16_MAX; 267 pSet->aEntries[j].cRefs = 0; 268 } 269 pSet->cEntries = PGMMAPSET_CLOSED; 270 } 271 272 /* 273 * Do we need the cache? Skip the last bit if we don't. 274 */ 275 Assert(!pVM->pgm.s.pvR0DynMapUsed); 276 pVM->pgm.s.pvR0DynMapUsed = NULL; 277 if (!HWACCMIsEnabled(pVM)) 278 return VINF_SUCCESS; 279 280 /* 281 * Reference and if necessary setup or grow the cache. 282 */ 283 PPGMR0DYNMAP pThis = g_pPGMR0DynMap; 284 AssertPtrReturn(pThis, VERR_INTERNAL_ERROR); 285 int rc = RTSemFastMutexRequest(pThis->hInitLock); 286 AssertLogRelRCReturn(rc, rc); 287 288 pThis->cUsers++; 289 if (pThis->cUsers == 1) 290 rc = pgmR0DynMapSetup(pThis); 291 else if (pThis->cMaxLoad > pThis->cPages / 2) 292 rc = pgmR0DynMapGrow(pThis); 293 if (RT_FAILURE(rc)) 294 pThis->cUsers--; 295 296 RTSemFastMutexRelease(pThis->hInitLock); 297 298 return rc; 299 } 300 301 302 /** 303 * Terminates the dynamic mapping cache usage for a VM. 304 * 305 * @param pVM Pointer to the shared VM structure. 306 */ 307 VMMR0DECL(void) PGMR0DynMapTermVM(PVM pVM) 308 { 309 /* 310 * Return immediately if we're not using the cache. 311 */ 312 if (!pVM->pgm.s.pvR0DynMapUsed) 313 return; 314 315 PPGMR0DYNMAP pThis = g_pPGMR0DynMap; 316 AssertPtrReturnVoid(pThis); 317 318 int rc = RTSemFastMutexRequest(pThis->hInitLock); 319 AssertLogRelRCReturnVoid(rc); 320 321 if (pVM->pgm.s.pvR0DynMapUsed == pThis) 322 { 323 pVM->pgm.s.pvR0DynMapUsed = NULL; 324 325 /* 326 * Clean up and check the auto sets. 327 */ 328 VMCPUID idCpu = pVM->cCPUs; 329 while (idCpu-- > 0) 330 { 331 PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet; 332 uint32_t j = pSet->cEntries; 333 if (j <= RT_ELEMENTS(pSet->aEntries)) 334 { 335 /* 336 * The set is open, close it. 337 */ 338 while (j-- > 0) 339 { 340 int32_t cRefs = pSet->aEntries[j].cRefs; 341 uint32_t iPage = pSet->aEntries[j].iPage; 342 LogRel(("PGMR0DynMapTermVM: %d dangling refs to %#x\n", cRefs, iPage)); 343 if (iPage < pThis->cPages && cRefs > 0) 344 pgmR0DynMapReleasePage(pThis, iPage, cRefs); 345 else 346 AssertMsgFailed(("cRefs=%d iPage=%#x cPages=%u\n", cRefs, iPage, pThis->cPages)); 347 348 pSet->aEntries[j].iPage = UINT16_MAX; 349 pSet->aEntries[j].cRefs = 0; 350 } 351 pSet->cEntries = PGMMAPSET_CLOSED; 352 } 353 354 j = RT_ELEMENTS(pSet->aEntries); 355 while (j-- > 0) 356 { 357 Assert(pSet->aEntries[j].iPage == UINT16_MAX); 358 Assert(!pSet->aEntries[j].cRefs); 359 } 360 } 361 362 /* 363 * Release our reference to the mapping cache. 364 */ 365 Assert(pThis->cUsers > 0); 366 pThis->cUsers--; 367 if (!pThis->cUsers) 368 pgmR0DynMapTearDown(pThis); 369 } 370 else 371 AssertMsgFailed(("pvR0DynMapUsed=%p pThis=%p\n", pVM->pgm.s.pvR0DynMapUsed, pThis)); 372 373 RTSemFastMutexRelease(pThis->hInitLock); 374 } 375 376 377 /** 378 * Called by PGMR0DynMapInitVM under the init lock. 379 * 380 * @returns VBox status code. 381 * @param pThis The dynamic mapping cache instance. 382 */ 383 static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis) 384 { 171 385 return VINF_SUCCESS; 172 386 } … … 174 388 175 389 /** 176 * Terminates the dynamic mapping cache usage for a VM. 177 * 178 * @param pVM Pointer to the shared VM structure. 179 */ 180 VMMR0DECL(void) PGMR0DynMapTermVM(PVM pVM) 181 { 182 NOREF(pVM); 390 * Called by PGMR0DynMapInitVM under the init lock. 391 * 392 * @returns VBox status code. 393 * @param pThis The dynamic mapping cache instance. 394 */ 395 static int pgmR0DynMapGrow(PPGMR0DYNMAP pThis) 396 { 397 return VINF_SUCCESS; 398 } 399 400 401 /** 402 * Called by PGMR0DynMapTermVM under the init lock. 403 * 404 * @returns VBox status code. 405 * @param pThis The dynamic mapping cache instance. 406 */ 407 static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis) 408 { 183 409 } 184 410 … … 269 495 RTCpuSetFill(&paPages[iFreePage].PendingSet); 270 496 if (pThis->fLegacyMode) 271 paPages[iFreePage].uPte.pLegacy->u = (paPages[iFreePage].uPte.pLegacy->u & X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT) 272 | X86_PTE_P | X86_PTE_A | X86_PTE_D 273 | (HCPhys & X86_PTE_PG_MASK); 497 { 498 X86PGUINT uOld = paPages[iFreePage].uPte.pLegacy->u; 499 X86PGUINT uOld2 = uOld; NOREF(uOld2); 500 X86PGUINT uNew = (uOld & X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT) 501 | X86_PTE_P | X86_PTE_A | X86_PTE_D 502 | (HCPhys & X86_PTE_PG_MASK); 503 while (!ASMAtomicCmpXchgExU32(&paPages[iFreePage].uPte.pLegacy->u, uNew, uOld, &uOld)) 504 AssertMsgFailed(("uOld=%#x uOld2=%#x uNew=%#x\n", uOld, uOld2, uNew)); 505 } 274 506 else 275 paPages[iFreePage].uPte.pPae->u = (paPages[iFreePage].uPte.pPae->u & X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT) 276 | X86_PTE_P | X86_PTE_A | X86_PTE_D 277 | (HCPhys & X86_PTE_PAE_PG_MASK); 507 { 508 X86PGPAEUINT uOld = paPages[iFreePage].uPte.pPae->u; 509 X86PGPAEUINT uOld2 = uOld; NOREF(uOld2); 510 X86PGPAEUINT uNew = (uOld & X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT) 511 | X86_PTE_P | X86_PTE_A | X86_PTE_D 512 | (HCPhys & X86_PTE_PAE_PG_MASK); 513 while (!ASMAtomicCmpXchgExU64(&paPages[iFreePage].uPte.pPae->u, uNew, uOld, &uOld)) 514 AssertMsgFailed(("uOld=%#llx uOld2=%#llx uNew=%#llx\n", uOld, uOld2, uNew)); 515 } 278 516 return iFreePage; 279 517 } … … 415 653 Assert(cRefs > 0); 416 654 pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs); 655 656 pSet->aEntries[i].iPage = UINT16_MAX; 657 pSet->aEntries[i].cRefs = 0; 417 658 } 418 659 … … 500 741 VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv) 501 742 { 743 /* 744 * Validate state. 745 */ 502 746 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys)); 747 PVMCPU pVCpu = VMMGetCpu(pVM); 748 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; 749 AssertPtrReturn(pVCpu, VERR_INTERNAL_ERROR); 750 AssertMsgReturn(pSet->cEntries > RT_ELEMENTS(pSet->aEntries), 751 ("%#x\n", pSet->cEntries), VERR_WRONG_ORDER); 503 752 504 753 /* … … 520 769 * If it's less than half full, don't bother looking for duplicates. 521 770 */ 522 PVMCPU pVCpu = VMMGetCpu(pVM);523 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;524 771 if (pSet->cEntries < RT_ELEMENTS(pSet->aEntries) / 2) 525 772 {
Note:
See TracChangeset
for help on using the changeset viewer.