Changeset 16321 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jan 28, 2009 4:36:24 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r16317 r16321 211 211 # include "PGMAllShw.h" 212 212 213 /* Guest - protected mode */213 /* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */ 214 214 # define PGM_GST_TYPE PGM_TYPE_PROT 215 215 # define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name) … … 1418 1418 VMMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM) 1419 1419 { 1420 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1421 1420 return pVM->pgm.s.HCPhysShwCR3; 1422 #else1423 return pVM->pgm.s.HCPhysShwCR3;1424 #endif1425 1421 } 1426 1422 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r16317 r16321 4418 4418 PGM_BTH_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3) 4419 4419 { 4420 /* Update guest paging info. */ 4420 4421 #if PGM_GST_TYPE == PGM_TYPE_32BIT \ 4421 4422 || PGM_GST_TYPE == PGM_TYPE_PAE \ … … 4553 4554 int rc = VINF_SUCCESS; 4554 4555 #endif 4556 4557 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 4558 /* Update shadow paging info. */ 4559 # if PGM_SHW_TYPE == PGM_TYPE_32BITS \ 4560 || PGM_SHW_TYPE == PGM_TYPE_PAE \ 4561 || PGM_SHW_TYPE == PGM_TYPE_AMD64 4562 4563 if (!HWACCMIsNestedPagingActive(pVM)) 4564 { 4565 /* Apply all hypervisor mappings to the new CR3. */ 4566 PGMMapActivateAll(pVM); 4567 4568 /* 4569 * Update the shadow root page as well since that's not fixed. 4570 */ 4571 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 4572 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3)) 4573 { 4574 /* It might have been freed already by a pool flush (see e.g. PGMR3MappingsUnfix). */ 4575 /** @todo Coordinate this better with the pool. */ 4576 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3)->enmKind != PGMPOOLKIND_FREE) 4577 pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), SHW_POOL_ROOT_IDX, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->GCPhys >> PAGE_SHIFT); 4578 pVM->pgm.s.pShwPageCR3R3 = 0; 4579 pVM->pgm.s.pShwPageCR3R0 = 0; 4580 pVM->pgm.s.pShwRootR3 = 0; 4581 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 4582 pVM->pgm.s.pShwRootR0 = 0; 4583 # endif 4584 pVM->pgm.s.HCPhysShwCR3 = 0; 4585 } 4586 4587 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32))); 4588 rc = pgmPoolAlloc(pVM, GCPhysCR3, BTH_PGMPOOLKIND_ROOT, SHW_POOL_ROOT_IDX, GCPhysCR3 >> PAGE_SHIFT, &pVM->pgm.s.CTX_SUFF(pShwPageCR3)); 4589 if (rc == VERR_PGM_POOL_FLUSHED) 4590 { 4591 Log(("MapCR3: PGM pool flushed -> signal sync cr3\n")); 4592 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3)); 4593 return VINF_PGM_SYNC_CR3; 4594 } 4595 AssertRCReturn(rc, rc); 4596 # ifdef IN_RING0 4597 pVM->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3)); 4598 # else 4599 pVM->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3)); 4600 # endif 4601 pVM->pgm.s.pShwRootR3 = (R3PTRTYPE(void *))pVM->pgm.s.CTX_SUFF(pShwPageCR3)->pvPageR3; 4602 Assert(pVM->pgm.s.pShwRootR3); 4603 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 4604 pVM->pgm.s.pShwRootR0 = (R0PTRTYPE(void *))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.CTX_SUFF(pShwPageCR3)); 4605 # endif 4606 pVM->pgm.s.HCPhysShwCR3 = pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key; 4607 rc = VINF_SUCCESS; /* clear it - pgmPoolAlloc returns hints. */ 4608 } 4609 # endif 4610 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */ 4611 4555 4612 return rc; 4556 4613 } … … 4568 4625 int rc = VINF_SUCCESS; 4569 4626 4627 /* Update guest paging info. */ 4570 4628 #if PGM_GST_TYPE == PGM_TYPE_32BIT 4571 4629 pVM->pgm.s.pGst32BitPdR3 = 0; … … 4596 4654 pVM->pgm.s.pGstAmd64Pml4R0 = 0; 4597 4655 # endif 4656 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 4598 4657 if (!HWACCMIsNestedPagingActive(pVM)) 4599 4658 { 4600 4659 pVM->pgm.s.pShwRootR3 = 0; 4601 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE4660 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 4602 4661 pVM->pgm.s.pShwRootR0 = 0; 4603 # endif4662 # endif 4604 4663 pVM->pgm.s.HCPhysShwCR3 = 0; 4605 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY4606 4664 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3)) 4607 4665 { … … 4611 4669 pVM->pgm.s.pShwPageCR3R0 = 0; 4612 4670 } 4671 } 4613 4672 # endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */ 4614 }4615 4673 4616 4674 #else /* prot/real mode stub */ 4617 4675 /* nothing to do */ 4618 4676 #endif 4677 4678 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 4679 /* Update shadow paging info. */ 4680 # if PGM_SHW_TYPE == PGM_TYPE_32BITS \ 4681 || PGM_SHW_TYPE == PGM_TYPE_PAE \ 4682 || PGM_SHW_TYPE == PGM_TYPE_AMD64 4683 4684 if (!HWACCMIsNestedPagingActive(pVM)) 4685 { 4686 /* @todo: dangerous as it's the current CR3! */ 4687 /* Remove the hypervisor mappings from the shadow page table. */ 4688 PGMMapDeactivateAll(pVM); 4689 4690 pVM->pgm.s.pShwRootR3 = 0; 4691 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 4692 pVM->pgm.s.pShwRootR0 = 0; 4693 # endif 4694 pVM->pgm.s.HCPhysShwCR3 = 0; 4695 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3)) 4696 { 4697 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 4698 pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), SHW_POOL_ROOT_IDX, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->GCPhys >> PAGE_SHIFT); 4699 pVM->pgm.s.pShwPageCR3R3 = 0; 4700 pVM->pgm.s.pShwPageCR3R0 = 0; 4701 } 4702 } 4703 # endif 4704 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */ 4705 4619 4706 return rc; 4620 4707 } -
trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
r13232 r16321 210 210 211 211 212 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 213 214 /** 215 * Sets all PDEs involved with the mapping in the shadow page table. 216 * 217 * @param pVM The VM handle. 218 * @param pMap Pointer to the mapping in question. 219 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping. 220 */ 221 void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE) 222 { 223 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s)) 224 return; 225 226 PGMMODE enmShadowMode = PGMGetShadowMode(pVM); 227 Assert(enmShadowMode <= PGMMODE_PAE_NX); 228 229 /* 230 * Init the page tables and insert them into the page directories. 231 */ 232 unsigned i = pMap->cPTs; 233 iNewPDE += i; 234 while (i-- > 0) 235 { 236 iNewPDE--; 237 238 switch(enmShadowMode) 239 { 240 case PGMMODE_32_BIT: 241 { 242 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s); 243 AssertFatal(pShw32BitPd); 244 245 if (pShw32BitPd->a[iNewPDE].n.u1Present) 246 { 247 Assert(!(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING)); 248 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE); 249 } 250 251 X86PDE Pde; 252 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */ 253 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT; 254 pShw32BitPd->a[iNewPDE] = Pde; 255 break; 256 } 257 258 case PGMMODE_PAE: 259 case PGMMODE_PAE_NX: 260 { 261 PX86PDPT pShwPdpt; 262 PX86PDPAE pShwPaePd; 263 const unsigned iPdPt = iNewPDE / 256; 264 unsigned iPDE = iNewPDE * 2 % 512; 265 266 pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s); 267 Assert(pShwPdpt); 268 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT)); 269 AssertFatal(pShwPaePd); 270 271 PPGMPOOLPAGE pPoolPagePde = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK); 272 AssertFatal(pPoolPagePde); 273 274 if (pShwPaePd->a[iPDE].n.u1Present) 275 { 276 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)); 277 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE); 278 } 279 280 X86PDEPAE PdePae0; 281 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0; 282 pShwPaePd->a[iPDE] = PdePae0; 283 284 /* 2nd 2 MB PDE of the 4 MB region */ 285 iPDE++; 286 AssertFatal(iPDE < 512); 287 288 if (pShwPaePd->a[iPDE].n.u1Present) 289 { 290 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)); 291 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE); 292 } 293 294 X86PDEPAE PdePae1; 295 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1; 296 pShwPaePd->a[iPDE] = PdePae1; 297 298 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */ 299 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING; 300 } 301 } 302 } 303 } 304 305 /** 306 * Clears all PDEs involved with the mapping in the shadow page table. 307 * 308 * @param pVM The VM handle. 309 * @param pMap Pointer to the mapping in question. 310 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping. 311 */ 312 void pgmMapClearShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE) 313 { 314 unsigned i = pMap->cPTs; 315 PGMMODE enmShadowMode = PGMGetShadowMode(pVM); 316 317 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s)) 318 return; 319 320 iOldPDE += i; 321 while (i-- > 0) 322 { 323 iOldPDE--; 324 325 switch(enmShadowMode) 326 { 327 case PGMMODE_32_BIT: 328 { 329 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s); 330 AssertFatal(pShw32BitPd); 331 332 pShw32BitPd->a[iOldPDE].u = 0; 333 break; 334 } 335 336 case PGMMODE_PAE: 337 case PGMMODE_PAE_NX: 338 { 339 PX86PDPT pPdpt = NULL; 340 PX86PDPAE pShwPaePd = NULL; 341 342 const unsigned iPD = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */ 343 unsigned iPDE = iOldPDE * 2 % 512; 344 pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s); 345 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPD << X86_PDPT_SHIFT)); 346 AssertFatal(pShwPaePd); 347 348 pShwPaePd->a[iPDE].u = 0; 349 350 iPDE++; 351 AssertFatal(iPDE < 512); 352 353 pShwPaePd->a[iPDE].u = 0; 354 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */ 355 pPdpt->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING; 356 break; 357 } 358 } 359 } 360 } 361 362 /** 363 * Apply the hypervisor mappings to the active CR3. 364 * 365 * @returns VBox status. 366 * @param pVM The virtual machine. 367 */ 368 VMMDECL(int) PGMMapActivateAll(PVM pVM) 369 { 370 /* 371 * Can skip this if mappings are safely fixed. 372 */ 373 if (pVM->pgm.s.fMappingsFixed) 374 return VINF_SUCCESS; 375 376 /* 377 * Iterate mappings. 378 */ 379 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext)) 380 { 381 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT; 382 383 pgmMapSetShadowPDEs(pVM, pCur, iPDE); 384 } 385 386 return VINF_SUCCESS; 387 } 388 389 /** 390 * Remove the hypervisor mappings from the active CR3 391 * 392 * @returns VBox status. 393 * @param pVM The virtual machine. 394 */ 395 VMMDECL(int) PGMMapDeactivateAll(PVM pVM) 396 { 397 /* 398 * Can skip this if mappings are safely fixed. 399 */ 400 if (pVM->pgm.s.fMappingsFixed) 401 return VINF_SUCCESS; 402 403 /* 404 * Iterate mappings. 405 */ 406 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext)) 407 { 408 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT; 409 410 pgmMapClearShadowPDEs(pVM, pCur, iPDE); 411 } 412 return VINF_SUCCESS; 413 } 414 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */ -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r16317 r16321 110 110 # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK 111 111 # define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES*X86_PG_PAE_PDPE_ENTRIES) 112 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_P AE_PD112 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PDPT 113 113 114 114 # endif
Note:
See TracChangeset
for help on using the changeset viewer.