Changeset 31090 in vbox
- Timestamp:
- Jul 26, 2010 6:51:04 AM (15 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r31089 r31090 73 73 #endif 74 74 75 /* enables the new code. */76 #define PGM_WITH_GST_WALK77 78 75 #ifndef IN_RING3 79 76 80 #ifdef PGM_WITH_GST_WALK81 77 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 82 78 /** … … 129 125 } 130 126 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 131 #endif /* PGM_WITH_GST_WALK */132 127 133 128 … … 157 152 && PGM_SHW_TYPE != PGM_TYPE_NESTED \ 158 153 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT) 159 #ifdef PGM_WITH_GST_WALK160 154 int rc; 161 155 … … 241 235 # endif /* !PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 242 236 243 #else /* !PGM_WITH_GST_WALK */244 245 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_PAE246 /*247 * Hide the instruction fetch trap indicator if NX isn't active.248 */249 /** @todo do this only when returning with a guest trap! */250 if ((uErr & X86_TRAP_PF_ID) && !pVCpu->pgm.s.fNoExecuteEnabled)251 {252 uErr &= ~X86_TRAP_PF_ID;253 TRPMSetErrorCode(pVCpu, uErr);254 }255 # endif256 257 /*258 * Get PDs.259 */260 int rc;261 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)262 # if PGM_GST_TYPE == PGM_TYPE_32BIT263 const unsigned iPDSrc = pvFault >> GST_PD_SHIFT;264 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(pVCpu);265 266 # elif PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64267 268 # if PGM_GST_TYPE == PGM_TYPE_PAE269 unsigned iPDSrc = 0; /* initialized to shut up gcc */270 X86PDPE PdpeSrc;271 PGSTPD pPDSrc = pgmGstGetPaePDPtr(pVCpu, pvFault, &iPDSrc, &PdpeSrc);272 273 # elif PGM_GST_TYPE == PGM_TYPE_AMD64274 unsigned iPDSrc = 0; /* initialized to shut up gcc */275 PX86PML4E pPml4eSrc = NULL; /* ditto */276 X86PDPE PdpeSrc;277 PGSTPD pPDSrc;278 279 pPDSrc = pgmGstGetLongModePDPtr(pVCpu, pvFault, &pPml4eSrc, &PdpeSrc, &iPDSrc);280 Assert(pPml4eSrc);281 # endif282 283 /* Quick check for a valid guest trap. (PAE & AMD64) */284 if (!pPDSrc)285 {286 # if PGM_GST_TYPE == PGM_TYPE_AMD64 && GC_ARCH_BITS == 64287 LogFlow(("Trap0eHandler: guest PML4 %d not present CR3=%RGp\n", (int)((pvFault >> X86_PML4_SHIFT) & X86_PML4_MASK), CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK));288 # else289 LogFlow(("Trap0eHandler: guest iPDSrc=%u not present CR3=%RGp\n", iPDSrc, CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK));290 # endif291 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });292 TRPMSetErrorCode(pVCpu, uErr);293 return VINF_EM_RAW_GUEST_TRAP;294 }295 # endif296 297 # else /* !PGM_WITH_PAGING */298 PGSTPD pPDSrc = NULL;299 const unsigned iPDSrc = 0;300 # endif /* !PGM_WITH_PAGING */301 302 # if !defined(PGM_WITHOUT_MAPPINGS) && ((PGM_GST_TYPE == PGM_TYPE_32BIT) || (PGM_GST_TYPE == PGM_TYPE_PAE))303 /*304 * Check for write conflicts with our hypervisor mapping early on. If the guest happens to access a non-present page,305 * where our hypervisor is currently mapped, then we'll create a #PF storm in the guest.306 */307 if ( (uErr & (X86_TRAP_PF_P | X86_TRAP_PF_RW)) == (X86_TRAP_PF_P | X86_TRAP_PF_RW)308 && MMHyperIsInsideArea(pVM, pvFault))309 {310 /* Force a CR3 sync to check for conflicts and emulate the instruction. */311 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);312 return VINF_EM_RAW_EMULATE_INSTR;313 }314 # endif315 316 /*317 * First check for a genuine guest page fault.318 */319 /** @todo This duplicates the page table walk we're doing below. Need to320 * find some way to avoid this double work, probably by caching321 * the data. */322 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)323 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);324 rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDSrc->a[iPDSrc], pvFault);325 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);326 if (rc == VINF_EM_RAW_GUEST_TRAP)327 {328 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });329 return rc;330 }331 # endif /* PGM_WITH_PAGING */332 #endif /* !PGM_WITH_GST_WALK */333 334 237 /* Take the big lock now. */ 335 238 *pfLockTaken = true; … … 339 242 * Fetch the guest PDE, PDPE and PML4E. 340 243 */ 341 #ifndef PGM_WITH_GST_WALK342 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)343 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];344 # else345 GSTPDE PdeSrc;346 PdeSrc.u = 0; /* faked so we don't have to #ifdef everything */347 PdeSrc.n.u1Present = 1;348 PdeSrc.n.u1Write = 1;349 PdeSrc.n.u1Accessed = 1;350 PdeSrc.n.u1User = 1;351 # endif352 353 #endif /* !PGM_WITH_GST_WALK */354 244 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 355 245 const unsigned iPDDst = pvFault >> SHW_PD_SHIFT; … … 360 250 361 251 PX86PDPAE pPDDst; 362 #ifdef PGM_WITH_GST_WALK363 252 # if PGM_GST_TYPE == PGM_TYPE_PAE 364 253 rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, GstWalk.Pdpe.u, &pPDDst); … … 366 255 rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, X86_PDPE_P, &pPDDst); /* RW, US and A are reserved in PAE mode. */ 367 256 # endif 368 #else369 # if PGM_GST_TYPE != PGM_TYPE_PAE370 X86PDPE PdpeSrc;371 372 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */373 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */374 # endif375 rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, PdpeSrc.u, &pPDDst);376 #endif377 257 if (rc != VINF_SUCCESS) 378 258 { … … 385 265 const unsigned iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK); 386 266 PX86PDPAE pPDDst; 387 #ifdef PGM_WITH_GST_WALK388 267 # if PGM_GST_TYPE == PGM_TYPE_PROT /* (AMD-V nested paging) */ 389 268 rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A, 390 X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US| X86_PDPE_A, &pPDDst);269 X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A, &pPDDst); 391 270 # else 392 271 rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, GstWalk.Pml4e.u, GstWalk.Pdpe.u, &pPDDst); 393 272 # endif 394 #else395 # if PGM_GST_TYPE == PGM_TYPE_PROT396 /* AMD-V nested paging */397 X86PML4E Pml4eSrc;398 X86PDPE PdpeSrc;399 PX86PML4E pPml4eSrc = &Pml4eSrc;400 401 /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */402 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;403 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;404 # endif405 406 rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc->u, PdpeSrc.u, &pPDDst);407 #endif /* !PGM_WITH_GST_WALK */408 273 if (rc != VINF_SUCCESS) 409 274 { … … 435 300 */ 436 301 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a); 437 #ifdef PGM_WITH_GST_WALK438 302 rc = PGM_BTH_NAME(CheckDirtyPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], GstWalk.pPde, pvFault); 439 #else440 rc = PGM_BTH_NAME(CheckDirtyPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault);441 #endif442 303 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a); 443 304 if (rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT) … … 448 309 return VINF_SUCCESS; 449 310 } 450 #ifdef PGM_WITH_GST_WALK451 311 AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u)); 452 312 AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u)); 453 #endif454 313 } 455 314 … … 471 330 * 472 331 */ 473 #ifdef PGM_WITH_GST_WALK474 332 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 475 333 Assert(GstWalk.Pde.n.u1Present); 476 334 # endif 477 #else478 Assert(PdeSrc.n.u1Present);479 #endif480 335 if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */ 481 336 && !pPDDst->a[iPDDst].n.u1Present … … 484 339 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; }); 485 340 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); 486 #ifdef PGM_WITH_GST_WALK487 341 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 488 342 LogFlow(("=>SyncPT %04x = %08RX64\n", (pvFault >> GST_PD_SHIFT) & GST_PD_MASK, (uint64_t)GstWalk.Pde.u)); … … 492 346 rc = PGM_BTH_NAME(SyncPT)(pVCpu, 0, NULL, pvFault); 493 347 # endif 494 #else /* !PGM_WITH_GST_WALK */495 LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0]));496 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, pvFault);497 #endif /* !PGM_WITH_GST_WALK */498 348 if (RT_SUCCESS(rc)) 499 349 { … … 501 351 return rc; 502 352 } 503 #ifdef PGM_WITH_GST_WALK504 353 Log(("SyncPT: %RGv failed!! rc=%Rrc\n", pvFault, rc)); 505 #else506 Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc));507 #endif508 354 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */ 509 355 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f); … … 536 382 unsigned iPT = pMapping->cb >> GST_PD_SHIFT; 537 383 while (iPT-- > 0) 538 #ifdef PGM_WITH_GST_WALK539 384 if (GstWalk.pPde[iPT].n.u1Present) 540 #else541 if (pPDSrc->a[iPDSrc + iPT].n.u1Present)542 #endif543 385 { 544 386 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eConflicts); … … 595 437 * in page tables which the guest believes to be present. 596 438 */ 597 #ifdef PGM_WITH_GST_WALK598 439 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 599 440 Assert(GstWalk.Pde.n.u1Present); 600 441 # endif 601 #else 602 Assert(PdeSrc.n.u1Present); 603 #endif 604 { 605 #ifdef PGM_WITH_GST_WALK 442 { 606 443 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 607 444 RTGCPHYS GCPhys = GstWalk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK; 608 #if 1609 RTGCPHYS GCPhys3;610 if ( GstWalk.Pde.b.u1Size && GST_IS_PSE_ACTIVE(pVCpu))611 GCPhys3 = GST_GET_PDE_BIG_PG_GCPHYS(pVM, GstWalk.Pde)612 | ((RTGCPHYS)pvFault & (GST_BIG_PAGE_OFFSET_MASK ^ PAGE_OFFSET_MASK));613 else614 GCPhys3 = GstWalk.Pte.u & GST_PTE_PG_MASK;615 Assert(GCPhys3 == GCPhys);616 #endif617 445 # else 618 446 RTGCPHYS GCPhys = (RTGCPHYS)pvFault & ~(RTGCPHYS)PAGE_OFFSET_MASK; 619 447 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 620 #else621 RTGCPHYS GCPhys = NIL_RTGCPHYS;622 623 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)624 if ( PdeSrc.b.u1Size625 && GST_IS_PSE_ACTIVE(pVCpu))626 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeSrc)627 | ((RTGCPHYS)pvFault & (GST_BIG_PAGE_OFFSET_MASK ^ PAGE_OFFSET_MASK));628 else629 {630 PGSTPT pPTSrc;631 rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);632 if (RT_SUCCESS(rc))633 {634 unsigned iPTESrc = (pvFault >> GST_PT_SHIFT) & GST_PT_MASK;635 if (pPTSrc->a[iPTESrc].n.u1Present)636 GCPhys = pPTSrc->a[iPTESrc].u & GST_PTE_PG_MASK;637 }638 }639 # else640 /* No paging so the fault address is the physical address */641 GCPhys = (RTGCPHYS)(pvFault & ~PAGE_OFFSET_MASK);642 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */643 #endif /* !PGM_WITH_GST_WALK */644 448 645 449 /* 646 450 * If we have a GC address we'll check if it has any flags set. 647 451 */ 648 #ifndef PGM_WITH_GST_WALK649 if (GCPhys != NIL_RTGCPHYS)650 #endif651 452 { 652 453 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); … … 678 479 && !(uErr & X86_TRAP_PF_P)) 679 480 { 680 #ifdef PGM_WITH_GST_WALK681 481 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 682 482 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); … … 684 484 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr); 685 485 # endif 686 #else687 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);688 #endif689 486 if ( RT_FAILURE(rc) 690 487 || !(uErr & X86_TRAP_PF_RW) … … 754 551 && !(uErr & X86_TRAP_PF_P)) 755 552 { 756 #ifdef PGM_WITH_GST_WALK757 553 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 758 #else759 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);760 #endif761 554 if ( RT_FAILURE(rc) 762 555 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE … … 855 648 && !(uErr & X86_TRAP_PF_P)) 856 649 { 857 #ifdef PGM_WITH_GST_WALK858 650 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 859 651 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); … … 861 653 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr); 862 654 # endif 863 #else864 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);865 #endif866 655 if ( RT_FAILURE(rc) 867 656 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE … … 977 766 uint64_t fPageGst2; 978 767 PGMGstGetPage(pVCpu, pvFault, &fPageGst2, &GCPhys2); 979 #ifdef PGM_WITH_GST_WALK 980 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 768 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 981 769 Log(("Page out of sync: %RGv eip=%08x PdeSrc.US=%d fPageGst2=%08llx GCPhys2=%RGp scan=%d\n", 982 770 pvFault, pRegFrame->eip, GstWalk.Pde.n.u1User, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip))); 983 # else771 # else 984 772 Log(("Page out of sync: %RGv eip=%08x fPageGst2=%08llx GCPhys2=%RGp scan=%d\n", 985 773 pvFault, pRegFrame->eip, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip))); 986 # endif 987 #else 988 Log(("Page out of sync: %RGv eip=%08x PdeSrc.n.u1User=%d fPageGst2=%08llx GCPhys2=%RGp scan=%d\n", 989 pvFault, pRegFrame->eip, PdeSrc.n.u1User, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip))); 990 #endif 774 # endif 991 775 # endif /* LOG_ENABLED */ 992 776 … … 1019 803 */ 1020 804 LogFlow(("CSAM ring 3 job\n")); 1021 #ifdef PGM_WITH_GST_WALK1022 805 int rc2 = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr); 1023 #else1024 int rc2 = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, 1, uErr);1025 #endif1026 806 AssertRC(rc2); 1027 807 … … 1072 852 } 1073 853 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) */ 1074 #ifdef PGM_WITH_GST_WALK1075 854 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 1076 855 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); … … 1078 857 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr); 1079 858 # endif 1080 #else1081 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);1082 #endif1083 859 if (RT_SUCCESS(rc)) 1084 860 { … … 1150 926 * page is not present, which is not true in this case. 1151 927 */ 1152 #ifdef PGM_WITH_GST_WALK1153 928 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 1154 929 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr); … … 1156 931 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, 1, uErr); 1157 932 # endif 1158 #else1159 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, 1, uErr);1160 #endif1161 933 if (RT_SUCCESS(rc)) 1162 934 { … … 1176 948 { 1177 949 rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, &GCPhys2); 1178 #if defined(PGM_WITH_GST_WALK) && PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)1179 AssertMsg(RT_SUCCESS(rc) && (fPageGst & X86_PTE_RW), ("rc=%Rrc fPageGst=%RX64 EffRW=%d EffUS=%d uErr=%RGp cr4=%RX64 pvFault=%RGv\n", rc, fPageGst, GstWalk.Core.fEffectiveRW, GstWalk.Core.fEffectiveUS, uErr, CPUMGetGuestCR0(pVCpu), pvFault ));1180 #else1181 950 AssertMsg(RT_SUCCESS(rc) && (fPageGst & X86_PTE_RW), ("rc=%Rrc fPageGst=%RX64\n", rc, fPageGst)); 1182 #endif1183 951 LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GCPhys2, (uint64_t)fPageGst)); 1184 952 } … … 1227 995 # endif /* PGM_OUT_OF_SYNC_IN_GC */ 1228 996 } 1229 #ifndef PGM_WITH_GST_WALK1230 else /* GCPhys == NIL_RTGCPHYS */1231 {1232 /*1233 * Page not present in Guest OS or invalid page table address.1234 * This is potential virtual page access handler food.1235 *1236 * For the present we'll say that our access handlers don't1237 * work for this case - we've already discarded the page table1238 * not present case which is identical to this.1239 *1240 * When we perchance find we need this, we will probably have AVL1241 * trees (offset based) to operate on and we can measure their speed1242 * agains mapping a page table and probably rearrange this handling1243 * a bit. (Like, searching virtual ranges before checking the1244 * physical address.)1245 */1246 }1247 #endif1248 997 } 1249 998 /** @todo This point is never really reached. Clean up later! */
Note:
See TracChangeset
for help on using the changeset viewer.