Changeset 38953 in vbox for trunk/src/VBox/VMM/VMMR3
- Timestamp:
- Oct 6, 2011 8:49:36 AM (13 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR3
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r38712 r38953 1186 1186 AssertCompile(sizeof(pVM->pgm.s) <= sizeof(pVM->pgm.padding)); 1187 1187 AssertCompile(sizeof(pVM->aCpus[0].pgm.s) <= sizeof(pVM->aCpus[0].pgm.padding)); 1188 AssertCompileMemberAlignment(PGM, CritSect , sizeof(uintptr_t));1188 AssertCompileMemberAlignment(PGM, CritSectX, sizeof(uintptr_t)); 1189 1189 1190 1190 /* … … 1342 1342 * Initialize the PGM critical section and flush the phys TLBs 1343 1343 */ 1344 rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSect , RT_SRC_POS, "PGM");1344 rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSectX, RT_SRC_POS, "PGM"); 1345 1345 AssertRCReturn(rc, rc); 1346 1346 … … 1454 1454 1455 1455 /* Almost no cleanup necessary, MM frees all memory. */ 1456 PDMR3CritSectDelete(&pVM->pgm.s.CritSect );1456 PDMR3CritSectDelete(&pVM->pgm.s.CritSectX); 1457 1457 1458 1458 return rc; … … 2578 2578 2579 2579 PGMDeregisterStringFormatTypes(); 2580 return PDMR3CritSectDelete(&pVM->pgm.s.CritSect );2580 return PDMR3CritSectDelete(&pVM->pgm.s.CritSectX); 2581 2581 } 2582 2582 … … 2689 2689 * Get page directory addresses. 2690 2690 */ 2691 pgmLock(pVM); 2691 2692 PX86PD pPDSrc = pgmGstGet32bitPDPtr(pVCpu); 2692 2693 Assert(pPDSrc); 2693 Assert(PGMPhysGCPhys2R3PtrAssert(pVM, (RTGCPHYS)(CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc);2694 2694 2695 2695 /* … … 2715 2715 } 2716 2716 } 2717 pgmUnlock(pVM); 2717 2718 } 2718 2719 … … 2726 2727 VMMR3DECL(int) PGMR3LockCall(PVM pVM) 2727 2728 { 2728 int rc = PDMR3CritSectEnterEx(&pVM->pgm.s.CritSect , true /* fHostCall */);2729 int rc = PDMR3CritSectEnterEx(&pVM->pgm.s.CritSectX, true /* fHostCall */); 2729 2730 AssertRC(rc); 2730 2731 return rc; -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r38838 r38953 150 150 * Simple stuff, go ahead. 151 151 */ 152 size_t cb= PAGE_SIZE - (off & PAGE_OFFSET_MASK);152 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK); 153 153 if (cb > cbRead) 154 154 cb = cbRead; 155 const void *pvSrc; 156 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc); 155 PGMPAGEMAPLOCK PgMpLck; 156 const void *pvSrc; 157 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck); 157 158 if (RT_SUCCESS(rc)) 159 { 158 160 memcpy(pvBuf, pvSrc, cb); 161 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck); 162 } 159 163 else 160 164 { … … 290 294 * Simple stuff, go ahead. 291 295 */ 292 size_t cb= PAGE_SIZE - (off & PAGE_OFFSET_MASK);296 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK); 293 297 if (cb > cbWrite) 294 298 cb = cbWrite; 295 void *pvDst; 296 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst); 299 PGMPAGEMAPLOCK PgMpLck; 300 void *pvDst; 301 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck); 297 302 if (RT_SUCCESS(rc)) 303 { 298 304 memcpy(pvDst, pvBuf, cb); 305 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck); 306 } 299 307 else 300 308 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", … … 3996 4004 pVM->pgm.s.cMappedChunks++; 3997 4005 3998 /* If we're running out of virtual address space, then we should unmap another chunk. */ 4006 /* 4007 * If we're running out of virtual address space, then we should 4008 * unmap another chunk. 4009 * 4010 * Currently, an unmap operation requires that all other virtual CPUs 4011 * are idling and not by chance making use of the memory we're 4012 * unmapping. So, we create an async unmap operation here. 4013 * 4014 * Now, when creating or restoring a saved state this wont work very 4015 * well since we may want to restore all guest RAM + a little something. 4016 * So, we have to do the unmap synchronously. Fortunately for us 4017 * though, during these operations the other virtual CPUs are inactive 4018 * and it should be safe to do this. 4019 */ 4020 /** @todo Eventually we should lock all memory when used and do 4021 * map+unmap as one kernel call without any rendezvous or 4022 * other precautions. */ 3999 4023 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax) 4000 4024 { 4001 /* Postpone the unmap operation (which requires a rendezvous operation) as we own the PGM lock here. */ 4002 rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysUnmapChunk, 1, pVM); 4003 AssertRC(rc); 4025 switch (VMR3GetState(pVM)) 4026 { 4027 case VMSTATE_LOADING: 4028 case VMSTATE_SAVING: 4029 { 4030 PVMCPU pVCpu = VMMGetCpu(pVM); 4031 if ( pVCpu 4032 && pVM->pgm.s.cDeprecatedPageLocks == 0) 4033 { 4034 pgmR3PhysUnmapChunkRendezvous(pVM, pVCpu, NULL); 4035 break; 4036 } 4037 /* fall thru */ 4038 } 4039 default: 4040 rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysUnmapChunk, 1, pVM); 4041 AssertRC(rc); 4042 break; 4043 } 4004 4044 } 4005 4045 } -
trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp
r38707 r38953 1243 1243 static void pgmR3StateCalcCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage) 1244 1244 { 1245 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT); 1246 void const *pvPage; 1247 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage); 1245 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT); 1246 PGMPAGEMAPLOCK PgMpLck; 1247 void const *pvPage; 1248 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck); 1248 1249 if (RT_SUCCESS(rc)) 1250 { 1249 1251 paLSPages[iPage].u32Crc = RTCrc32(pvPage, PAGE_SIZE); 1252 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck); 1253 } 1250 1254 else 1251 1255 paLSPages[iPage].u32Crc = UINT32_MAX; /* Invalid */ … … 1290 1294 if (paLSPages[iPage].u32Crc != UINT32_MAX) 1291 1295 { 1292 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT); 1293 void const *pvPage; 1294 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage); 1296 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT); 1297 PGMPAGEMAPLOCK PgMpLck; 1298 void const *pvPage; 1299 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck); 1295 1300 if (RT_SUCCESS(rc)) 1301 { 1296 1302 pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage, pszWhere); 1303 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck); 1304 } 1297 1305 } 1298 1306 } … … 1333 1341 && (iPage & 0x7ff) == 0x100 1334 1342 #endif 1335 && PDMR3CritSectYield(&pVM->pgm.s.CritSect )1343 && PDMR3CritSectYield(&pVM->pgm.s.CritSectX) 1336 1344 && pVM->pgm.s.idRamRangesGen != idRamRangesGen) 1337 1345 { … … 1558 1566 if ( uPass != SSM_PASS_FINAL 1559 1567 && (iPage & 0x7ff) == 0x100 1560 && PDMR3CritSectYield(&pVM->pgm.s.CritSect )1568 && PDMR3CritSectYield(&pVM->pgm.s.CritSectX) 1561 1569 && pVM->pgm.s.idRamRangesGen != idRamRangesGen) 1562 1570 { … … 1622 1630 * SSM call may block). 1623 1631 */ 1624 uint8_t abPage[PAGE_SIZE]; 1625 void const *pvPage; 1626 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage); 1632 uint8_t abPage[PAGE_SIZE]; 1633 PGMPAGEMAPLOCK PgMpLck; 1634 void const *pvPage; 1635 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage, &PgMpLck); 1627 1636 if (RT_SUCCESS(rc)) 1628 1637 { … … 1632 1641 pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage, "save#3"); 1633 1642 #endif 1643 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck); 1634 1644 } 1635 1645 pgmUnlock(pVM); … … 2231 2241 * Load the page. 2232 2242 */ 2233 void *pvPage; 2234 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage); 2243 PGMPAGEMAPLOCK PgMpLck; 2244 void *pvPage; 2245 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage, &PgMpLck); 2235 2246 if (RT_SUCCESS(rc)) 2247 { 2236 2248 rc = SSMR3GetMem(pSSM, pvPage, PAGE_SIZE); 2249 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck); 2250 } 2237 2251 2238 2252 return rc; … … 2677 2691 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM_SHADOW) 2678 2692 { 2679 void *pvDstPage; 2680 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage); 2693 PGMPAGEMAPLOCK PgMpLck; 2694 void *pvDstPage; 2695 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck); 2681 2696 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc); 2697 2682 2698 ASMMemZeroPage(pvDstPage); 2699 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck); 2683 2700 } 2684 2701 /* Free it only if it's not part of a previously … … 2719 2736 case PGM_STATE_REC_RAM_RAW: 2720 2737 { 2721 void *pvDstPage; 2722 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage); 2738 PGMPAGEMAPLOCK PgMpLck; 2739 void *pvDstPage; 2740 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck); 2723 2741 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc); 2724 2742 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE); 2743 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck); 2725 2744 if (RT_FAILURE(rc)) 2726 2745 return rc; -
trunk/src/VBox/VMM/VMMR3/PGMSharedPage.cpp
r36891 r38953 342 342 case PGM_PAGE_STATE_WRITE_MONITORED: 343 343 { 344 const void *pvPage;345 344 /* Check if the page was allocated, but completely zero. */ 346 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvPage); 347 if ( rc == VINF_SUCCESS 345 PGMPAGEMAPLOCK PgMpLck; 346 const void *pvPage; 347 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvPage, &PgMpLck); 348 if ( RT_SUCCESS(rc) 348 349 && ASMMemIsZeroPage(pvPage)) 349 {350 350 cAllocZero++; 351 } 352 else 353 if (GMMR3IsDuplicatePage(pVM, PGM_PAGE_GET_PAGEID(pPage))) 351 else if (GMMR3IsDuplicatePage(pVM, PGM_PAGE_GET_PAGEID(pPage))) 354 352 cDuplicate++; 355 353 else 356 354 cUnique++; 357 355 if (RT_SUCCESS(rc)) 356 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck); 358 357 break; 359 358 }
Note:
See TracChangeset
for help on using the changeset viewer.