Changeset 80177 in vbox
- Timestamp:
- Aug 7, 2019 9:55:21 AM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 132618
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r80007 r80177 264 264 for (unsigned i = 0; i < 2; i++) 265 265 { 266 # ifdef VBOX_WITH_RAW_MODE_NOT_R0267 if ((uShw.pPDPae->a[iShw + i].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P))268 {269 Assert(pgmMapAreMappingsEnabled(pVM));270 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);271 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw=%#x!\n", iShwPdpt, iShw+i));272 break;273 }274 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */275 266 if (uShw.pPDPae->a[iShw+i].n.u1Present) 276 267 { … … 290 281 if (iShw2 < RT_ELEMENTS(uShw.pPDPae->a)) 291 282 { 292 # ifdef VBOX_WITH_RAW_MODE_NOT_R0293 if ((uShw.pPDPae->a[iShw2].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P))294 {295 Assert(pgmMapAreMappingsEnabled(pVM));296 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);297 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw2=%#x!\n", iShwPdpt, iShw2));298 break;299 }300 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */301 283 if (uShw.pPDPae->a[iShw2].n.u1Present) 302 284 { … … 366 348 LogFlow(("pgmPoolMonitorChainChanging: PGMPOOLKIND_32BIT_PD %x\n", iShw)); 367 349 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD)); 368 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 369 if (uShw.pPD->a[iShw].u & PGM_PDFLAGS_MAPPING) 350 if (uShw.pPD->a[iShw].n.u1Present) 370 351 { 371 Assert(pgmMapAreMappingsEnabled(pVM)); 372 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 373 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict)); 374 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw)); 375 break; 376 } 377 else 378 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 379 { 380 if (uShw.pPD->a[iShw].n.u1Present) 381 { 382 LogFlow(("pgmPoolMonitorChainChanging: 32 bit pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPD->a[iShw].u)); 383 pgmPoolFree(pVM, 384 uShw.pPD->a[iShw].u & X86_PDE_PAE_PG_MASK, 385 pPage->idx, 386 iShw); 387 ASMAtomicWriteU32(&uShw.pPD->a[iShw].u, 0); 388 } 352 LogFlow(("pgmPoolMonitorChainChanging: 32 bit pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPD->a[iShw].u)); 353 pgmPoolFree(pVM, 354 uShw.pPD->a[iShw].u & X86_PDE_PAE_PG_MASK, 355 pPage->idx, 356 iShw); 357 ASMAtomicWriteU32(&uShw.pPD->a[iShw].u, 0); 389 358 } 390 359 /* paranoia / a bit assumptive. */ … … 396 365 && iShw2 < RT_ELEMENTS(uShw.pPD->a)) 397 366 { 398 # ifdef VBOX_WITH_RAW_MODE_NOT_R0399 if (uShw.pPD->a[iShw2].u & PGM_PDFLAGS_MAPPING)400 {401 Assert(pgmMapAreMappingsEnabled(pVM));402 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));403 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);404 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));405 break;406 }407 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */408 367 if (uShw.pPD->a[iShw2].n.u1Present) 409 368 { … … 422 381 { 423 382 LogFlow(("pgmPoolMonitorChainChanging: iShw=%#x: %RX32 -> freeing it!\n", iShw, uShw.pPD->a[iShw].u)); 424 # ifdef IN_RC /* TLB load - we're pushing things a bit... */425 ASMProbeReadByte(pvAddress);426 # endif427 383 pgmPoolFree(pVM, uShw.pPD->a[iShw].u & X86_PDE_PG_MASK, pPage->idx, iShw); 428 384 ASMAtomicWriteU32(&uShw.pPD->a[iShw].u, 0); … … 437 393 const unsigned iShw = off / sizeof(X86PDEPAE); 438 394 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD)); 439 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 440 if (uShw.pPDPae->a[iShw].u & PGM_PDFLAGS_MAPPING) 441 { 442 Assert(pgmMapAreMappingsEnabled(pVM)); 443 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 444 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict)); 445 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw)); 446 break; 447 } 448 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 395 449 396 /* 450 397 * Causes trouble when the guest uses a PDE to refer to the whole page table level … … 452 399 * table entries -> recheck; probably only applies to the RC case.) 453 400 */ 454 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 455 else 456 #endif 401 if (uShw.pPDPae->a[iShw].n.u1Present) 457 402 { 458 if (uShw.pPDPae->a[iShw].n.u1Present) 459 { 460 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPae->a[iShw].u)); 461 pgmPoolFree(pVM, 462 uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK, 463 pPage->idx, 464 iShw); 465 ASMAtomicWriteU64(&uShw.pPDPae->a[iShw].u, 0); 466 } 403 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPae->a[iShw].u)); 404 pgmPoolFree(pVM, 405 uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK, 406 pPage->idx, 407 iShw); 408 ASMAtomicWriteU64(&uShw.pPDPae->a[iShw].u, 0); 467 409 } 410 468 411 /* paranoia / a bit assumptive. */ 469 412 if ( (off & 7) … … 473 416 AssertBreak(iShw2 < RT_ELEMENTS(uShw.pPDPae->a)); 474 417 475 #ifdef VBOX_WITH_RAW_MODE_NOT_R0476 if ( iShw2 != iShw477 && uShw.pPDPae->a[iShw2].u & PGM_PDFLAGS_MAPPING)478 {479 Assert(pgmMapAreMappingsEnabled(pVM));480 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);481 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));482 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));483 break;484 }485 else486 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */487 418 if (uShw.pPDPae->a[iShw2].n.u1Present) 488 419 { … … 513 444 if (iShw < X86_PG_PAE_PDPE_ENTRIES) /* don't use RT_ELEMENTS(uShw.pPDPT->a), because that's for long mode only */ 514 445 { 515 # ifdef VBOX_WITH_RAW_MODE_NOT_R0516 if (uShw.pPDPT->a[iShw].u & PGM_PLXFLAGS_MAPPING)517 {518 Assert(pgmMapAreMappingsEnabled(pVM));519 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));520 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);521 LogFlow(("pgmPoolMonitorChainChanging: Detected pdpt conflict at iShw=%#x!\n", iShw));522 break;523 }524 else525 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */526 446 if (uShw.pPDPT->a[iShw].n.u1Present) 527 447 { … … 542 462 && iShw2 < X86_PG_PAE_PDPE_ENTRIES) 543 463 { 544 # ifdef VBOX_WITH_RAW_MODE_NOT_R0545 if (uShw.pPDPT->a[iShw2].u & PGM_PLXFLAGS_MAPPING)546 {547 Assert(pgmMapAreMappingsEnabled(pVM));548 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));549 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);550 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));551 break;552 }553 else554 # endif /* VBOX_WITH_RAW_MODE_NOT_R0 */555 464 if (uShw.pPDPT->a[iShw2].n.u1Present) 556 465 { … … 568 477 } 569 478 570 #ifndef IN_RC571 479 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD: 572 480 { … … 664 572 break; 665 573 } 666 #endif /* IN_RING0 */667 574 668 575 default: … … 745 652 } 746 653 747 # ifndef IN_RC748 654 /** @todo could make this general, faulting close to rsp should be a safe reuse heuristic. */ 749 655 if ( HMHasPendingIrq(pVM) 750 && (pRegFrame->rsp - pvFault)< 32)656 && pRegFrame->rsp - pvFault < 32) 751 657 { 752 658 /* Fault caused by stack writes while trying to inject an interrupt event. */ … … 754 660 return true; 755 661 } 756 # else757 NOREF(pVM); NOREF(pvFault);758 # endif759 662 760 663 LogFlow(("Reused instr %RGv %d at %RGv param1.fUse=%llx param1.reg=%d\n", pRegFrame->rip, pDis->pCurInstr->uOpcode, pvFault, pDis->Param1.fUse, pDis->Param1.Base.idxGenReg)); … … 880 783 else if (rc2 == VERR_EM_INTERPRETER) 881 784 { 882 # ifdef IN_RC 883 if (PATMIsPatchGCAddr(pVM, pRegFrame->eip)) 884 { 885 LogFlow(("pgmRZPoolAccessPfHandlerFlush: Interpretation failed for patch code %04x:%RGv, ignoring.\n", 886 pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->eip)); 887 rc = VINF_SUCCESS; 888 STAM_COUNTER_INC(&pPool->StatMonitorPfRZIntrFailPatch2); 889 } 890 else 891 # endif 892 { 893 rc = VINF_EM_RAW_EMULATE_INSTR; 894 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitorPf,EmulateInstr)); 895 } 785 rc = VINF_EM_RAW_EMULATE_INSTR; 786 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitorPf,EmulateInstr)); 896 787 } 897 788 else if (RT_FAILURE_NP(rc2)) … … 952 843 while (pRegFrame->rcx) 953 844 { 954 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)845 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 955 846 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 956 847 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, uIncrement); … … 959 850 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, uIncrement); 960 851 # endif 961 # ifdef IN_RC962 *(uint32_t *)(uintptr_t)pu32 = pRegFrame->eax;963 # else964 852 PGMPhysSimpleWriteGCPhys(pVM, GCPhysFault, &pRegFrame->rax, uIncrement); 965 # endif966 853 pu32 += uIncrement; 967 854 GCPhysFault += uIncrement; … … 1007 894 * Clear all the pages. ASSUMES that pvFault is readable. 1008 895 */ 1009 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)896 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1010 897 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); 1011 898 # endif … … 1026 913 } 1027 914 1028 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)915 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1029 916 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 1030 917 # endif … … 1355 1242 } 1356 1243 } 1357 # endif /* PGMPOOL_WITH_OPTIMIZED_DIRTY_PT */1244 # endif /* PGMPOOL_WITH_OPTIMIZED_DIRTY_PT && IN_RING0 */ 1358 1245 1359 1246 STAM_COUNTER_INC(&pPool->StatMonitorPfRZFlushModOverflow); … … 1452 1339 else 1453 1340 cMaxModifications = 24; 1454 # ifdef IN_RC1455 cMaxModifications *= 2; /* traps are cheaper than exists. */1456 # endif1457 1341 #endif 1458 1342 … … 1787 1671 Log(("Flush dirty page %RGp cMods=%d\n", pPage->GCPhys, pPage->cModifications)); 1788 1672 1789 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)1673 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1790 1674 PVMCPU pVCpu = VMMGetCpu(pVM); 1791 1675 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu); … … 1853 1737 Log(("Removed dirty page %RGp cMods=%d cChanges=%d\n", pPage->GCPhys, pPage->cModifications, cChanges)); 1854 1738 1855 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)1739 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1856 1740 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 1857 1741 # endif … … 2145 2029 static int pgmPoolCacheFreeOne(PPGMPOOL pPool, uint16_t iUser) 2146 2030 { 2147 #ifndef IN_RC2148 2031 const PVM pVM = pPool->CTX_SUFF(pVM); 2149 #endif2150 2032 Assert(pPool->iAgeHead != pPool->iAgeTail); /* We shouldn't be here if there < 2 cached entries! */ 2151 2033 STAM_COUNTER_INC(&pPool->StatCacheFreeUpOne); … … 3666 3548 else 3667 3549 { 3668 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)/** @todo we can drop this now. */3550 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 /** @todo we can drop this now. */ 3669 3551 /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow and 3670 3552 pgmPoolTrackFlushGCPhysPTs will/may kill the pool otherwise. */ … … 3686 3568 *pfFlushTLBs = true; 3687 3569 3688 # if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)3570 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3689 3571 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); 3690 3572 # endif … … 3809 3691 break; 3810 3692 } 3811 #ifndef IN_RC 3693 3812 3694 case PGMPOOLKIND_EPT_PT_FOR_PHYS: 3813 3695 { … … 3834 3716 break; 3835 3717 } 3836 #endif 3837 } 3718 } 3719 3838 3720 if (!--cLeft) 3839 3721 break; … … 3962 3844 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD: 3963 3845 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD: 3964 #ifdef IN_RC3965 /*3966 * In 32 bits PAE mode we *must* invalidate the TLB when changing a3967 * PDPT entry; the CPU fetches them only during cr3 load, so any3968 * non-present PDPT will continue to cause page faults.3969 */3970 ASMReloadCR3();3971 #endif3972 RT_FALL_THRU();3973 3846 case PGMPOOLKIND_PAE_PD_PHYS: 3974 3847 case PGMPOOLKIND_PAE_PDPT_PHYS: … … 4976 4849 } 4977 4850 4978 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)4851 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 4979 4852 /* Start a subset so we won't run out of mapping space. */ 4980 4853 PVMCPU pVCpu = VMMGetCpu(pVM); … … 5009 4882 pgmPoolCacheFlushPage(pPool, pPage); 5010 4883 5011 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)4884 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 5012 4885 /* Heavy stuff done. */ 5013 4886 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset); … … 5097 4970 * If the pool isn't full grown yet, expand it. 5098 4971 */ 5099 if ( pPool->cCurPages < pPool->cMaxPages 5100 #if defined(IN_RC) 5101 /* Hack alert: we can't deal with jumps to ring 3 when called from MapCR3 and allocating pages for PAE PDs. */ 5102 && enmKind != PGMPOOLKIND_PAE_PD_FOR_PAE_PD 5103 && (enmKind < PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD || enmKind > PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD) 5104 #endif 5105 ) 4972 if (pPool->cCurPages < pPool->cMaxPages) 5106 4973 { 5107 4974 STAM_PROFILE_ADV_SUSPEND(&pPool->StatAlloc, a); … … 5368 5235 { 5369 5236 Log(("PGMPoolFlushPage: found pgm pool pages for %RGp\n", GCPhys)); 5370 # ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT5237 # ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT 5371 5238 if (pPage->fDirty) 5372 5239 STAM_COUNTER_INC(&pPool->StatForceFlushDirtyPage); 5373 5240 else 5374 # endif5241 # endif 5375 5242 STAM_COUNTER_INC(&pPool->StatForceFlushPage); 5376 5243 Assert(!pgmPoolIsPageLocked(pPage)); … … 5408 5275 } 5409 5276 5410 #endif /* IN_RING3 */5411 #ifdef IN_RING35412 5277 5413 5278 /** … … 5496 5361 pPage->cLastAccessHandler = 0; 5497 5362 pPage->cLocked = 0; 5498 # ifdef VBOX_STRICT5363 # ifdef VBOX_STRICT 5499 5364 pPage->GCPtrDirtyFault = NIL_RTGCPTR; 5500 # endif5365 # endif 5501 5366 } 5502 5367 pPool->aPages[pPool->cCurPages - 1].iNext = NIL_PGMPOOL_IDX; … … 5560 5425 pPool->iAgeTail = NIL_PGMPOOL_IDX; 5561 5426 5562 # ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT5427 # ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT 5563 5428 /* Clear all dirty pages. */ 5564 5429 pPool->idxFreeDirtyPage = 0; … … 5566 5431 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aidxDirtyPages); i++) 5567 5432 pPool->aidxDirtyPages[i] = NIL_PGMPOOL_IDX; 5568 # endif5433 # endif 5569 5434 5570 5435 /*
Note:
See TracChangeset
for help on using the changeset viewer.