Changeset 16182 in vbox
- Timestamp:
- Jan 22, 2009 5:30:28 PM (16 years ago)
- Location:
- trunk
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/pgm.h
r15429 r16182 541 541 VMMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc); 542 542 VMMR3DECL(int) PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr); 543 VMMR3DECL(int) PGMR3FinalizeMappings(PVM pVM); 543 544 VMMR3DECL(int) PGMR3MappingsSize(PVM pVM, uint32_t *pcb); 544 545 VMMR3DECL(int) PGMR3MappingsFix(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb); -
trunk/src/VBox/VMM/PGMInternal.h
r16175 r16182 371 371 /** Pointer to next entry. */ 372 372 RCPTRTYPE(struct PGMMAPPING *) pNextRC; 373 #if GC_ARCH_BITS == 64 374 RTRCPTR padding0; 375 #endif 373 /** Indicate whether this entry is finalized. */ 374 bool fFinalized; 376 375 /** Start Virtual address. */ 377 376 RTGCPTR GCPtr; … … 387 386 R3PTRTYPE(const char *) pszDesc; 388 387 /** Number of page tables. */ 389 RTUINTcPTs;388 uint32_t cPTs; 390 389 #if HC_ARCH_BITS != GC_ARCH_BITS || GC_ARCH_BITS == 64 391 RTUINTuPadding1; /**< Alignment padding. */390 uint32_t uPadding1; /**< Alignment padding. */ 392 391 #endif 393 392 /** Array of page table mapping data. Each entry … … 2454 2453 R0PTRTYPE(PPGMMAPPING) pMappingsR0; 2455 2454 2455 /** Indicates that PGMR3FinalizeMappings has been called and that further 2456 * PGMR3MapIntermediate calls will be rejected. */ 2457 bool fFinalizedMappings; 2456 2458 /** If set no conflict checks are required. (boolean) */ 2457 2459 bool fMappingsFixed; -
trunk/src/VBox/VMM/PGMMap.cpp
r15410 r16182 114 114 */ 115 115 const unsigned iPageDir = GCPtr >> X86_PD_SHIFT; 116 const unsigned cPTs = cb >> X86_PD_SHIFT;117 unsigned i;118 for (i = 0; i < cPTs; i++)119 {120 if (pVM->pgm.s.pInterPD->a[iPageDir + i].n.u1Present)121 {122 AssertMsgFailed(("Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));123 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));124 return VERR_PGM_MAPPING_CONFLICT;125 }126 }127 /** @todo AMD64: add check in PAE structures too, so we can remove all the 32-Bit paging stuff there. */116 const unsigned cPTs = cb >> X86_PD_SHIFT; 117 if (pVM->pgm.s.fFinalizedMappings) 118 { 119 for (unsigned i = 0; i < cPTs; i++) 120 if (pVM->pgm.s.pInterPD->a[iPageDir + i].n.u1Present) 121 { 122 AssertMsgFailed(("Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT))); 123 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT))); 124 return VERR_PGM_MAPPING_CONFLICT; 125 } 126 /** @todo AMD64: add check in PAE structures too, so we can remove all the 32-Bit paging stuff there. */ 127 } 128 128 129 129 /* … … 158 158 */ 159 159 Log4(("PGMR3MapPT: GCPtr=%RGv cPTs=%u pbPTs=%p\n", GCPtr, cPTs, pbPTs)); 160 for ( i = 0; i < cPTs; i++)160 for (unsigned i = 0; i < cPTs; i++) 161 161 { 162 162 /* … … 183 183 i, pNew->aPTs[i].paPaePTsR3, pNew->aPTs[i].paPaePTsRC, pNew->aPTs[i].paPaePTsR0, pNew->aPTs[i].HCPhysPaePT0, pNew->aPTs[i].HCPhysPaePT1)); 184 184 } 185 pgmR3MapSetPDEs(pVM, pNew, iPageDir); 185 if (pVM->pgm.s.fFinalizedMappings) 186 pgmR3MapSetPDEs(pVM, pNew, iPageDir); 187 /* else PGMR3FinalizeMappings() */ 186 188 187 189 /* … … 219 221 { 220 222 LogFlow(("PGMR3UnmapPT: GCPtr=%#x\n", GCPtr)); 223 AssertReturn(pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER); 221 224 222 225 /* … … 272 275 273 276 /** 277 * Checks whether a range of PDEs in the intermediate 278 * memory context are unused. 279 * 280 * We're talking 32-bit PDEs here. 281 * 282 * @returns true/false. 283 * @param pVM Pointer to the shared VM structure. 284 * @param iPD The first PDE in the range. 285 * @param cPTs The number of PDEs in the range. 286 */ 287 DECLINLINE(bool) pgmR3AreIntermediatePDEsUnused(PVM pVM, unsigned iPD, unsigned cPTs) 288 { 289 if (pVM->pgm.s.pInterPD->a[iPD].n.u1Present) 290 return false; 291 while (cPTs > 1) 292 { 293 iPD++; 294 if (pVM->pgm.s.pInterPD->a[iPD].n.u1Present) 295 return false; 296 cPTs--; 297 } 298 return true; 299 } 300 301 302 /** 303 * Unlinks the mapping. 304 * 305 * The mapping *must* be in the list. 306 * 307 * @param pVM Pointer to the shared VM structure. 308 * @param pMapping The mapping to unlink. 309 */ 310 static void pgmR3MapUnlink(PVM pVM, PPGMMAPPING pMapping) 311 { 312 PPGMMAPPING pAfterThis = pVM->pgm.s.pMappingsR3; 313 if (pAfterThis == pMapping) 314 { 315 /* head */ 316 pVM->pgm.s.pMappingsR3 = pMapping->pNextR3; 317 pVM->pgm.s.pMappingsRC = pMapping->pNextRC; 318 pVM->pgm.s.pMappingsR0 = pMapping->pNextR0; 319 } 320 else 321 { 322 /* in the list */ 323 while (pAfterThis->pNextR3 != pMapping) 324 { 325 pAfterThis = pAfterThis->pNextR3; 326 AssertReleaseReturnVoid(pAfterThis); 327 } 328 329 pAfterThis->pNextR3 = pMapping->pNextR3; 330 pAfterThis->pNextRC = pMapping->pNextRC; 331 pAfterThis->pNextR0 = pMapping->pNextR0; 332 } 333 } 334 335 336 /** 337 * Links the mapping. 338 * 339 * @param pVM Pointer to the shared VM structure. 340 * @param pMapping The mapping to linked. 341 */ 342 static void pgmR3MapLink(PVM pVM, PPGMMAPPING pMapping) 343 { 344 /* 345 * Find the list location (it's sorted by GCPhys) and link it in. 346 */ 347 if ( !pVM->pgm.s.pMappingsR3 348 || pVM->pgm.s.pMappingsR3->GCPtr > pMapping->GCPtr) 349 { 350 /* head */ 351 pMapping->pNextR3 = pVM->pgm.s.pMappingsR3; 352 pMapping->pNextRC = pVM->pgm.s.pMappingsRC; 353 pMapping->pNextR0 = pVM->pgm.s.pMappingsR0; 354 pVM->pgm.s.pMappingsR3 = pMapping; 355 pVM->pgm.s.pMappingsRC = MMHyperR3ToRC(pVM, pMapping); 356 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pMapping); 357 } 358 else 359 { 360 /* in the list */ 361 PPGMMAPPING pAfterThis = pVM->pgm.s.pMappingsR3; 362 PPGMMAPPING pBeforeThis = pAfterThis->pNextR3; 363 while (pBeforeThis && pBeforeThis->GCPtr <= pMapping->GCPtr) 364 { 365 pAfterThis = pBeforeThis; 366 pBeforeThis = pBeforeThis->pNextR3; 367 } 368 369 pMapping->pNextR3 = pAfterThis->pNextR3; 370 pMapping->pNextRC = pAfterThis->pNextRC; 371 pMapping->pNextR0 = pAfterThis->pNextR0; 372 pAfterThis->pNextR3 = pMapping; 373 pAfterThis->pNextRC = MMHyperR3ToRC(pVM, pMapping); 374 pAfterThis->pNextR0 = MMHyperR3ToR0(pVM, pMapping); 375 } 376 } 377 378 379 /** 380 * Finalizes the intermediate context. 381 * 382 * This is called at the end of the ring-3 init and will construct the 383 * intermediate paging structures, relocating all the mappings in the process. 384 * 385 * @returns VBox status code. 386 * @param pVM Pointer to the shared VM structure. 387 * @thread EMT(0) 388 */ 389 VMMR3DECL(int) PGMR3FinalizeMappings(PVM pVM) 390 { 391 AssertReturn(!pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER); 392 pVM->pgm.s.fFinalizedMappings = true; 393 394 /* 395 * Loop until all mappings have been finalized. 396 */ 397 /*unsigned iPDNext = UINT32_C(0xc0000000) >> X86_PD_SHIFT;*/ /* makes CSAM/PATM freak out booting linux. :-/ */ 398 #if 0 399 unsigned iPDNext = MM_HYPER_AREA_ADDRESS >> X86_PD_SHIFT; 400 #else 401 unsigned iPDNext = 1 << X86_PD_SHIFT; /* no hint, map them from the top. */ 402 #endif 403 PPGMMAPPING pCur; 404 do 405 { 406 pCur = pVM->pgm.s.pMappingsR3; 407 while (pCur) 408 { 409 if (!pCur->fFinalized) 410 { 411 /* 412 * Find a suitable location. 413 */ 414 RTGCPTR const GCPtrOld = pCur->GCPtr; 415 const unsigned cPTs = pCur->cPTs; 416 unsigned iPDNew = iPDNext; 417 if ( iPDNew + cPTs >= X86_PG_ENTRIES /* exclude the last PD */ 418 || !pgmR3AreIntermediatePDEsUnused(pVM, iPDNew, cPTs) 419 || !pCur->pfnRelocate(pVM, GCPtrOld, (RTGCPTR)iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_SUGGEST, pCur->pvUser)) 420 { 421 /* No luck, just scan down from 4GB-4MB, giving up at 4MB. */ 422 iPDNew = X86_PG_ENTRIES - cPTs - 1; 423 while ( iPDNew > 0 424 && ( !pgmR3AreIntermediatePDEsUnused(pVM, iPDNew, cPTs) 425 || !pCur->pfnRelocate(pVM, GCPtrOld, (RTGCPTR)iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_SUGGEST, pCur->pvUser)) 426 ) 427 iPDNew--; 428 AssertLogRelReturn(iPDNew != 0, VERR_PGM_INTERMEDIATE_PAGING_CONFLICT); 429 } 430 431 /* 432 * Relocate it (something akin to pgmR3MapRelocate). 433 */ 434 pgmR3MapSetPDEs(pVM, pCur, iPDNew); 435 436 /* unlink the mapping, update the entry and relink it. */ 437 pgmR3MapUnlink(pVM, pCur); 438 439 RTGCPTR const GCPtrNew = (RTGCPTR)iPDNew << X86_PD_SHIFT; 440 pCur->GCPtr = GCPtrNew; 441 pCur->GCPtrLast = GCPtrNew + pCur->cb - 1; 442 pCur->fFinalized = true; 443 444 pgmR3MapLink(pVM, pCur); 445 446 /* Finally work the callback. */ 447 pCur->pfnRelocate(pVM, GCPtrOld, GCPtrNew, PGMRELOCATECALL_RELOCATE, pCur->pvUser); 448 449 /* 450 * The list order might have changed, start from the beginning again. 451 */ 452 iPDNext = iPDNew + cPTs; 453 break; 454 } 455 456 /* next */ 457 pCur = pCur->pNextR3; 458 } 459 } while (pCur); 460 461 return VINF_SUCCESS; 462 } 463 464 465 /** 274 466 * Gets the size of the current guest mappings if they were to be 275 467 * put next to oneanother. … … 526 718 AssertMsg(cbPages <= (512 << PAGE_SHIFT), ("The mapping is too big %d bytes\n", cbPages)); 527 719 AssertMsg(HCPhys < _4G && HCPhys + cbPages < _4G, ("Addr=%RTptr HCPhys=%RHp cbPages=%d\n", Addr, HCPhys, cbPages)); 720 AssertReturn(!pVM->pgm.s.fFinalizedMappings, VERR_WRONG_ORDER); 528 721 529 722 /* 530 723 * Check for internal conflicts between the virtual address and the physical address. 724 * A 1:1 mapping is fine, but partial overlapping is a no-no. 531 725 */ 532 726 if ( uAddress != HCPhys … … 537 731 ) 538 732 AssertLogRelMsgFailedReturn(("Addr=%RTptr HCPhys=%RHp cbPages=%d\n", Addr, HCPhys, cbPages), 539 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);540 541 /* The intermediate mapping must not conflict with our default hypervisor address. */542 size_t cbHyper;543 RTGCPTR pvHyperGC = MMHyperGetArea(pVM, &cbHyper);544 if (uAddress < pvHyperGC545 ? uAddress + cbPages > pvHyperGC546 : pvHyperGC + cbHyper > uAddress547 )548 AssertLogRelMsgFailedReturn(("Addr=%RTptr HyperGC=%RGv cbPages=%zu\n", Addr, pvHyperGC, cbPages),549 733 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT); 550 734 -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r16172 r16182 499 499 GEN_CHECK_OFF(PGM, pMappingsRC); 500 500 GEN_CHECK_OFF(PGM, pMappingsR0); 501 GEN_CHECK_OFF(PGM, fFinalizedMappings); 501 502 GEN_CHECK_OFF(PGM, fMappingsFixed); 502 503 GEN_CHECK_OFF(PGM, GCPtrMappingFixed);
Note:
See TracChangeset
for help on using the changeset viewer.