Changeset 18724 in vbox
- Timestamp:
- Apr 5, 2009 5:36:54 PM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 45682
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGMInternal.h
r18665 r18724 1610 1610 bool fLocked; 1611 1611 } PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE; 1612 /** Pointer to a const pool page. */ 1613 typedef PGMPOOLPAGE const *PCPGMPOOLPAGE; 1612 1614 1613 1615 -
trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
r18291 r18724 231 231 232 232 /* 233 * In it the page tables and insert them into thepage directories.233 * Insert the page tables into the shadow page directories. 234 234 */ 235 235 unsigned i = pMap->cPTs; … … 239 239 iNewPDE--; 240 240 241 switch (enmShadowMode)241 switch (enmShadowMode) 242 242 { 243 243 case PGMMODE_32_BIT: … … 248 248 PGMDynLockHCPage(pVM, (uint8_t *)pShw32BitPd); 249 249 #endif 250 /* Free any previous user, unless it's us. */ 251 Assert( (pShw32BitPd->a[iNewPDE].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING) 252 || (pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPT); 250 253 if ( pShw32BitPd->a[iNewPDE].n.u1Present 251 254 && !(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING)) 252 {253 255 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE); 254 } 255 256 X86PDE Pde; 257 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */ 258 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT; 259 pShw32BitPd->a[iNewPDE] = Pde; 256 257 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags. */ 258 pShw32BitPd->a[iNewPDE].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US 259 | (uint32_t)pMap->aPTs[i].HCPhysPT; 260 260 #ifdef IN_RC 261 261 /* Unlock dynamic mappings again. */ … … 268 268 case PGMMODE_PAE_NX: 269 269 { 270 const u nsignediPdPt = iNewPDE / 256;271 unsigned iP DE= iNewPDE * 2 % 512;270 const uint32_t iPdPt = iNewPDE / 256; 271 unsigned iPaePde = iNewPDE * 2 % 512; 272 272 PX86PDPT pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s); 273 273 Assert(pShwPdpt); … … 275 275 PGMDynLockHCPage(pVM, (uint8_t *)pShwPdpt); 276 276 #endif 277 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT)); 277 278 /* 279 * Get the shadow PD. 280 * If no PD, sync it (PAE guest) or fake (not present or 32-bit guest). 281 * Note! The RW, US and A bits are reserved for PAE PDPTEs. Setting the 282 * accessed bit causes invalid VT-x guest state errors. 283 */ 284 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, iPdPt << X86_PDPT_SHIFT); 278 285 if (!pShwPaePd) 279 286 { 280 X86PDPE GstPdpe; 281 287 X86PDPE GstPdpe; 282 288 if (PGMGetGuestMode(pVM) < PGMMODE_PAE) 283 { 284 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */ 285 GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */ 286 } 289 GstPdpe.u = X86_PDPE_P; 287 290 else 288 291 { 289 PX86PDPE pGstPdpe; 290 pGstPdpe = pgmGstGetPaePDPEPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT)); 292 PX86PDPE pGstPdpe = pgmGstGetPaePDPEPtr(&pVM->pgm.s, iPdPt << X86_PDPT_SHIFT); 291 293 if (pGstPdpe) 292 294 GstPdpe = *pGstPdpe; 293 295 else 294 GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */296 GstPdpe.u = X86_PDPE_P; 295 297 } 296 int rc = pgmShwSyncPaePDPtr(pVM, (iPdPt << X86_PDPT_SHIFT), &GstPdpe, &pShwPaePd);297 AssertFatal (RT_SUCCESS(rc));298 int rc = pgmShwSyncPaePDPtr(pVM, iPdPt << X86_PDPT_SHIFT, &GstPdpe, &pShwPaePd); 299 AssertFatalRC(rc); 298 300 } 299 301 Assert(pShwPaePd); … … 301 303 PGMDynLockHCPage(pVM, (uint8_t *)pShwPaePd); 302 304 #endif 303 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK); 305 306 /* 307 * Mark the page as locked; disallow flushing. 308 */ 309 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK); 304 310 AssertFatal(pPoolPagePd); 305 306 311 if (!pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd)) 307 {308 /* Mark the page as locked; disallow flushing. */309 312 pgmPoolLockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd); 310 }311 313 #ifdef VBOX_STRICT 312 else 313 if (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING) 314 else if (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING) 314 315 { 315 316 Assert(PGMGetGuestMode(pVM) >= PGMMODE_PAE); 316 AssertFatalMsg((pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0, ("%RX64 vs %RX64\n", pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT0)); 317 Assert(pShwPaePd->a[iPDE+1].u & PGM_PDFLAGS_MAPPING); 318 AssertFatalMsg((pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1, ("%RX64 vs %RX64\n", pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1)); 319 } 320 #endif 321 if ( pShwPaePd->a[iPDE].n.u1Present 322 && !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)) 323 { 324 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)); 325 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPDE); 326 } 327 328 X86PDEPAE PdePae0; 329 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0; 330 pShwPaePd->a[iPDE] = PdePae0; 331 332 /* 2nd 2 MB PDE of the 4 MB region */ 333 iPDE++; 334 AssertFatal(iPDE < 512); 335 336 if ( pShwPaePd->a[iPDE].n.u1Present 337 && !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)) 338 { 339 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPDE); 340 } 341 X86PDEPAE PdePae1; 342 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1; 343 pShwPaePd->a[iPDE] = PdePae1; 344 345 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */ 317 AssertFatalMsg((pShwPaePd->a[iPaePde].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0, ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT0)); 318 Assert(pShwPaePd->a[iPaePde+1].u & PGM_PDFLAGS_MAPPING); 319 AssertFatalMsg((pShwPaePd->a[iPaePde+1].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1, ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1)); 320 } 321 #endif 322 323 /* 324 * Insert our first PT, freeing anything we might be replacing unless it's a mapping (i.e. us). 325 */ 326 Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING) 327 || (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0); 328 if ( pShwPaePd->a[iPaePde].n.u1Present 329 && !(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING)) 330 { 331 Assert(!(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING)); 332 pgmPoolFree(pVM, pShwPaePd->a[iPaePde].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPaePde); 333 } 334 pShwPaePd->a[iPaePde].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US 335 | pMap->aPTs[i].HCPhysPaePT0; 336 337 /* 2nd 2 MB PDE of the 4 MB region, same as above. */ 338 iPaePde++; 339 AssertFatal(iPaePde < 512); 340 Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING) 341 || (pShwPaePd->a[iPaePde].u & X86_PDE_PAE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1); 342 if ( pShwPaePd->a[iPaePde].n.u1Present 343 && !(pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING)) 344 pgmPoolFree(pVM, pShwPaePd->a[iPaePde].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPaePde); 345 pShwPaePd->a[iPaePde].u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US 346 | pMap->aPTs[i].HCPhysPaePT1; 347 348 /* 349 * Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) 350 */ 346 351 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING; 347 352 … … 384 389 385 390 PX86PDPT pCurrentShwPdpt = NULL; 386 387 391 if ( PGMGetGuestMode(pVM) >= PGMMODE_PAE 388 392 && pShwPageCR3 != pVM->pgm.s.CTX_SUFF(pShwPageCR3)) 389 {390 393 pCurrentShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s); 391 }392 394 393 395 unsigned i = pMap->cPTs; … … 403 405 case PGMMODE_32_BIT: 404 406 { 405 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);407 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3); 406 408 AssertFatal(pShw32BitPd); 407 409 408 410 Assert(!pShw32BitPd->a[iOldPDE].n.u1Present || (pShw32BitPd->a[iOldPDE].u & PGM_PDFLAGS_MAPPING)); 409 pShw32BitPd->a[iOldPDE].u 411 pShw32BitPd->a[iOldPDE].u = 0; 410 412 break; 411 413 } … … 415 417 { 416 418 const unsigned iPdpt = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */ 417 unsigned iP DE= iOldPDE * 2 % 512;419 unsigned iPaePde = iOldPDE * 2 % 512; 418 420 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3); 419 421 PX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pShwPdpt, (iPdpt << X86_PDPT_SHIFT)); 420 422 421 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */ 423 /* 424 * Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) 425 */ 422 426 if (fDeactivateCR3) 423 427 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING; … … 435 439 } 436 440 } 437 if (pCurrentShwPdpt) 438 { 439 /* If the page directory of the old CR3 is reused in the new one, then don't clear the hypervisor mappings. */ 440 if ((pCurrentShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) == (pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK)) 441 { 442 LogFlow(("pgmMapClearShadowPDEs: Pdpe %d reused -> don't clear hypervisor mappings!\n", iPdpt)); 443 break; 444 } 445 } 441 442 /* 443 * If the page directory of the old CR3 is reused in the new one, then don't 444 * clear the hypervisor mappings. 445 */ 446 if ( pCurrentShwPdpt 447 && (pCurrentShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) == (pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) ) 448 { 449 LogFlow(("pgmMapClearShadowPDEs: Pdpe %d reused -> don't clear hypervisor mappings!\n", iPdpt)); 450 break; 451 } 452 453 /* 454 * Clear the mappings in the PD. 455 */ 446 456 AssertFatal(pShwPaePd); 447 448 Assert(!pShwPaePd->a[iPDE].n.u1Present || (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)); 449 pShwPaePd->a[iPDE].u = 0; 450 451 iPDE++; 452 AssertFatal(iPDE < 512); 453 454 Assert(!pShwPaePd->a[iPDE].n.u1Present || (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)); 455 pShwPaePd->a[iPDE].u = 0; 456 457 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK); 458 AssertFatal(pPoolPagePd); 459 460 if (pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd)) 461 { 462 /* Mark the page as unlocked; allow flushing again. */ 463 pgmPoolUnlockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd); 457 Assert(!pShwPaePd->a[iPaePde].n.u1Present || (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING)); 458 pShwPaePd->a[iPaePde].u = 0; 459 460 iPaePde++; 461 AssertFatal(iPaePde < 512); 462 Assert(!pShwPaePd->a[iPaePde].n.u1Present || (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING)); 463 pShwPaePd->a[iPaePde].u = 0; 464 465 /* 466 * Unlock the shadow pool PD page if the PDPTE no longer holds any mappings. 467 */ 468 if ( fDeactivateCR3 469 || !(pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING)) 470 { 471 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK); 472 AssertFatal(pPoolPagePd); 473 if (pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd)) 474 pgmPoolUnlockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd); 464 475 } 465 476 break; … … 499 510 case PGMMODE_32_BIT: 500 511 { 501 P X86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);512 PCX86PD pShw32BitPd = (PCX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3); 502 513 AssertFatal(pShw32BitPd); 503 514 … … 512 523 case PGMMODE_PAE_NX: 513 524 { 514 const unsigned iP D= iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */525 const unsigned iPdpt = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */ 515 526 unsigned iPaePDE = iPDE * 2 % 512; 516 PX86PDPT p Pdpt= (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);517 P X86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pPdpt, (iPD << X86_PDPT_SHIFT));527 PX86PDPT pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3); 528 PCX86PDPAE pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pShwPdpt, iPdpt << X86_PDPT_SHIFT); 518 529 AssertFatal(pShwPaePd); 519 530 520 531 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0), 521 ("Expected %RX64 vs %RX64; iPDE=%#x iP D=%#x iPaePDE=%#x %RGv %s\n",522 pShwPaePd->a[iP DE].u,(PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),523 iPDE, iP D, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) ));532 ("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n", 533 pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0), 534 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) )); 524 535 525 536 iPaePDE++; … … 527 538 528 539 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1), 529 ("Expected %RX64 vs %RX64; iPDE=%#x iPD=%#x iPaePDE=%#x %RGv %s\n", 530 pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1), 531 iPDE, iPD, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) )); 532 533 AssertMsg(pPdpt->a[iPD].u & PGM_PLXFLAGS_MAPPING, 534 ("%RX64; iPD=%#x iPDE=%#x iPaePDE=%#x %RGv %s\n", 535 pPdpt->a[iPD].u, 536 iPDE, iPD, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) )); 540 ("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n", 541 pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1), 542 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) )); 543 544 AssertMsg(pShwPdpt->a[iPdpt].u & PGM_PLXFLAGS_MAPPING, 545 ("%RX64; iPdpt=%#x iPDE=%#x iPaePDE=%#x %RGv %s\n", 546 pShwPdpt->a[iPdpt].u, 547 iPDE, iPdpt, iPaePDE, pMap->GCPtr, R3STRING(pMap->pszDesc) )); 548 549 PCPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK); 550 AssertFatal(pPoolPagePd); 551 AssertMsg(pPoolPagePd->fLocked, (".idx=%d .type=%d\n", pPoolPagePd->idx, pPoolPagePd->enmKind)); 537 552 break; 538 553 } … … 567 582 { 568 583 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT; 569 570 584 pgmMapCheckShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE); 571 585 }
Note:
See TracChangeset
for help on using the changeset viewer.