Changeset 99132 in vbox
- Timestamp:
- Mar 23, 2023 9:00:20 AM (22 months ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r98103 r99132 1185 1185 # endif 1186 1186 1187 Log7Func(("SLAT: GCPhysNestedFault=%RGp -> GCPhys=%#RGp\n", GCPhysNestedFault, pWalk->GCPhys)); 1188 1187 1189 /* 1188 1190 * Check page-access permissions. … … 1253 1255 PPGMPAGE pPage; 1254 1256 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage); 1255 AssertRCReturn(rc, rc); 1257 if (RT_FAILURE(rc)) 1258 { 1259 /* 1260 * We failed to get the physical page which means it's a reserved/invalid 1261 * page address (not MMIO even). This can typically be observed with 1262 * Microsoft Hyper-V enabled Windows guests. We must fall back to emulating 1263 * the instruction, see @bugref{10318#c7}. 1264 */ 1265 return VINF_EM_RAW_EMULATE_INSTR; 1266 } 1267 /* Check if this is an MMIO page and NOT the VMX APIC-access page. */ 1256 1268 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && !PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage)) 1257 1269 { … … 1296 1308 { 1297 1309 /* This is a read-only page. */ 1298 AssertMsgFailed(("Failed\n"));1299 1300 Assert(!PGM_PAGE_IS_ZERO(pPage));1301 1310 AssertFatalMsg(!PGM_PAGE_IS_BALLOONED(pPage), ("Unexpected ballooned page at %RGp\n", GCPhysPage)); 1302 1311 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2MakeWritable; }); … … 2544 2553 PPGMPAGE pPage; 2545 2554 int rc = pgmPhysGetPageEx(pVCpu->CTX_SUFF(pVM), GCPhysPage, &pPage); 2546 AssertRCReturnVoid(rc); 2555 if (RT_SUCCESS(rc)) 2556 { /* likely */ } 2557 else 2558 { 2559 /* 2560 * This is a RAM hole/invalid/reserved address (not MMIO). 2561 * Nested Microsoft Hyper-V maps addresses like 0xf0220000 as RW WB memory. 2562 * Shadow a not-present page similar to MMIO, see @bugref{10318#c7}. 2563 */ 2564 Assert(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS); 2565 if (SHW_PTE_IS_P(*pPte)) 2566 { 2567 Log2(("SyncPageWorker: deref! *pPte=%RX64\n", SHW_PTE_LOG64(*pPte))); 2568 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, SHW_PTE_GET_HCPHYS(*pPte), iPte, NIL_RTGCPHYS); 2569 } 2570 Log7Func(("RAM hole/reserved %RGp -> ShwPte=0\n", GCPhysPage)); 2571 SHW_PTE_ATOMIC_SET(*pPte, 0); 2572 return; 2573 } 2547 2574 2548 2575 Assert(!PGM_PAGE_IS_BALLOONED(pPage)); 2549 2550 # ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC2551 /* Make the page writable if necessary. */2552 /** @todo This needs to be applied to the regular case below, not here. And,2553 * no we should *NOT* make the page writble, instead we need to write2554 * protect them if necessary. */2555 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM2556 && ( PGM_PAGE_IS_ZERO(pPage)2557 || ( (pGstWalkAll->u.Ept.Pte.u & EPT_E_WRITE)2558 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED2559 # ifdef VBOX_WITH_REAL_WRITE_MONITORED_PAGES2560 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED2561 # endif2562 # ifdef VBOX_WITH_PAGE_SHARING2563 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_SHARED2564 # endif2565 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_BALLOONED2566 )2567 )2568 )2569 {2570 AssertMsgFailed(("GCPhysPage=%RGp\n", GCPhysPage)); /** @todo Shouldn't happen but if it does deal with it later. */2571 }2572 # endif2573 2576 2574 2577 /* … … 2579 2582 if (!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) || PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage)) 2580 2583 { 2581 /** @todo access bit. */ 2582 Pte.u = PGM_PAGE_GET_HCPHYS(pPage) | fGstShwPteFlags; 2583 Log7Func(("regular page (%R[pgmpage]) at %RGp -> %RX64\n", pPage, GCPhysPage, Pte.u)); 2584 # ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC 2585 /* Page wasn't allocated, write protect it. */ 2586 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM 2587 && ( PGM_PAGE_IS_ZERO(pPage) 2588 || ( (pGstWalkAll->u.Ept.Pte.u & EPT_E_WRITE) 2589 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED 2590 # ifdef VBOX_WITH_REAL_WRITE_MONITORED_PAGES 2591 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED 2592 # endif 2593 # ifdef VBOX_WITH_PAGE_SHARING 2594 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_SHARED 2595 # endif 2596 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_BALLOONED 2597 ) 2598 ) 2599 ) 2600 { 2601 Pte.u = PGM_PAGE_GET_HCPHYS(pPage) | (fGstShwPteFlags & ~EPT_E_WRITE); 2602 Log7Func(("zero page (%R[pgmpage]) at %RGp -> %RX64\n", pPage, GCPhysPage, Pte.u)); 2603 } 2604 else 2605 # endif 2606 { 2607 /** @todo access bit. */ 2608 Pte.u = PGM_PAGE_GET_HCPHYS(pPage) | fGstShwPteFlags; 2609 Log7Func(("regular page (%R[pgmpage]) at %RGp -> %RX64\n", pPage, GCPhysPage, Pte.u)); 2610 } 2584 2611 } 2585 2612 else if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r99051 r99132 3441 3441 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 3442 3442 case PGMPOOLKIND_EPT_PT_FOR_EPT_PT: 3443 # ifdef PGM_WITH_LARGE_PAGES 3444 case PGMPOOLKIND_EPT_PT_FOR_EPT_2MB: 3445 # endif 3443 3446 #endif 3444 3447 { … … 3513 3516 /* Large page case only. */ 3514 3517 case PGMPOOLKIND_EPT_PD_FOR_PHYS: 3515 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT3516 case PGMPOOLKIND_EPT_PT_FOR_EPT_2MB: /* X86_PDE4M_PS is same as leaf bit in EPT; be careful! */3517 #endif3518 3518 { 3519 3519 Assert(pVM->pgm.s.fNestedPaging); -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r98103 r99132 319 319 320 320 321 #if 0 322 PGM_SHW_DECL(int, NestedGetPage)(PVMCPUCC pVCpu, PEPTPD pEptPd, PPGMPTWALK pWalk, uint64_t *pfFlags, PRTHCPHYS pHCPhys) 323 { 324 #if PGM_SHW_TYPE == PGM_TYPE_EPT 325 RTGCPHYS const GCPhysNested = pWalk->GCPhysNested; 326 unsigned const iEptPd = ((GCPhysNested >> SHW_PD_SHIFT) & SHW_PD_MASK); 327 Assert(iEptPd < EPT_PG_ENTRIES); 328 SHWPDE EptPde = pEptPd->a[iEptPd]; 329 if (!SHW_PDE_IS_P(EptPde)) 330 { 331 *pfFlags = 0; 332 *pHCPhys = NIL_RTHCPHYS; 333 return VERR_PAGE_TABLE_NOT_PRESENT; 334 } 335 336 if (SHW_PDE_IS_BIG(EptPde)) 337 { 338 Assert(pWalk->fBigPage); 339 if (pfFlags) 340 *pfFlags = (EptPde.u & ~SHW_PDE_PG_MASK); 341 if (pHCPhys) 342 *pHCPhys = (EptPde.u & EPT_PDE2M_PG_MASK) + (pWalk->GCPhys & (RT_BIT(EPT_PD_SHIFT) - 1) & X86_PAGE_4K_BASE_MASK); 343 return VINF_SUCCESS; 344 } 345 346 PSHWPT pEptPt; 347 int const rc = PGM_HCPHYS_2_PTR(pVCpu->CTX_SUFF(pVM), pVCpu, EptPde.u & EPT_PDE_PG_MASK, &pEptPt); 348 if (RT_FAILURE(rc)) 349 { 350 *pfFlags = 0; 351 *pHCPhys = NIL_RTHCPHYS; 352 return rc; 353 } 354 355 unsigned const iEptPt = (GCPhysNested >> SHW_PT_SHIFT) & SHW_PT_MASK; 356 Assert(iEptPt < EPT_PG_ENTRIES); 357 SHWPTE EptPte = pEptPt->a[iEptPt]; 358 if (!SHW_PTE_IS_P(EptPte)) 359 { 360 *pfFlags = 0; 361 *pHCPhys = NIL_RTHCPHYS; 362 return VERR_PAGE_NOT_PRESENT; 363 } 364 365 if (pfFlags) 366 { 367 /* Read, Write and Execute bits (Present mask) are cumulative. */ 368 *pfFlags = (SHW_PTE_GET_U(EptPte) & ~SHW_PTE_PG_MASK) 369 & ((EptPde.u & EPT_PRESENT_MASK) | ~(uint64_t)EPT_PRESENT_MASK); 370 } 371 if (pHCPhys) 372 *pHCPhys = SHW_PTE_GET_HCPHYS(EptPte); 373 return VINF_SUCCESS; 374 375 #else /* PGM_SHW_TYPE != PGM_TYPE_EPT */ 376 RT_NOREF(pVCpu, pEptPd, pWalk, *pfFlags, pHCPhys); 377 AssertFailed(); 378 return VERR_PGM_SHW_NONE_IPE; 379 #endif /* PGM_SHW_TYPE != PGM_TYPE_EPT */ 380 } 381 #endif 382 383 321 384 /** 322 385 * Gets effective page information (from the VMM page directory). … … 385 448 386 449 # elif PGM_SHW_TYPE == PGM_TYPE_EPT 450 /* 451 * We're currently ASSUMING that the SLAT mode here is always "direct". 452 * If a guest (e.g., nested Hyper-V) turns out to require this 453 * (probably while modifying shadow non-MMIO2 pages) then handle this 454 * by calling (NestedGetPage). Asserting for now. 455 */ 387 456 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT); 388 457 PEPTPD pPDDst; … … 541 610 542 611 # elif PGM_SHW_TYPE == PGM_TYPE_EPT 543 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT); 544 const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK); 545 PEPTPD pPDDst; 546 EPTPDE Pde; 547 548 rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst); 549 if (rc != VINF_SUCCESS) 612 EPTPDE Pde; 613 const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK); 614 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT) 550 615 { 551 AssertRC(rc); 552 return rc; 616 PEPTPD pPDDst; 617 rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst); 618 if (rc != VINF_SUCCESS) 619 { 620 AssertRC(rc); 621 return rc; 622 } 623 Assert(pPDDst); 624 Pde = pPDDst->a[iPd]; 553 625 } 554 Assert(pPDDst); 555 Pde = pPDDst->a[iPd]; 626 else 627 { 628 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 629 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT); 630 Assert(!(GCPtr & GUEST_PAGE_OFFSET_MASK)); 631 PGMPTWALK Walk; 632 PGMPTWALKGST GstWalkAll; 633 RTGCPHYS const GCPhysNestedPage = GCPtr; 634 rc = pgmGstSlatWalk(pVCpu, GCPhysNestedPage, false /*fIsLinearAddrValid*/, 0 /*GCPtrNestedFault*/, &Walk, 635 &GstWalkAll); 636 if (RT_SUCCESS(rc)) 637 { 638 # ifdef DEBUG_ramshankar 639 /* Paranoia. */ 640 Assert(GstWalkAll.enmType == PGMPTWALKGSTTYPE_EPT); 641 Assert(Walk.fSucceeded); 642 Assert(Walk.fEffective & (PGM_PTATTRS_EPT_R_MASK | PGM_PTATTRS_EPT_W_MASK | PGM_PTATTRS_EPT_X_SUPER_MASK)); 643 Assert(Walk.fIsSlat); 644 Assert(RT_BOOL(Walk.fEffective & PGM_PTATTRS_R_MASK) == RT_BOOL(Walk.fEffective & PGM_PTATTRS_EPT_R_MASK)); 645 Assert(RT_BOOL(Walk.fEffective & PGM_PTATTRS_W_MASK) == RT_BOOL(Walk.fEffective & PGM_PTATTRS_EPT_W_MASK)); 646 Assert(RT_BOOL(Walk.fEffective & PGM_PTATTRS_NX_MASK) == !RT_BOOL(Walk.fEffective & PGM_PTATTRS_EPT_X_SUPER_MASK)); 647 # endif 648 PGM_A20_ASSERT_MASKED(pVCpu, Walk.GCPhys); 649 Assert(!(fFlags & X86_PTE_RW) || (Walk.fEffective & PGM_PTATTRS_W_MASK)); 650 651 /* Update the nested-guest physical address with the translated guest-physical address. */ 652 GCPtr = Walk.GCPhys; 653 654 /* Get the PD. */ 655 PSHWPD pEptPd; 656 rc = pgmShwGetNestedEPTPDPtr(pVCpu, GCPhysNestedPage, NULL /*ppPdpt*/, &pEptPd, &GstWalkAll); 657 AssertRCReturn(rc, rc); 658 Assert(pEptPd); 659 Assert(iPd < EPT_PG_ENTRIES); 660 Pde = pEptPd->a[iPd]; 661 } 662 else 663 { 664 Log(("Failed to translate nested-guest physical address %#RGp rc=%Rrc\n", GCPhysNestedPage, rc)); 665 return rc; 666 } 667 668 # else /* !VBOX_WITH_NESTED_HWVIRT_VMX_EPT */ 669 AssertFailed(); 670 return VERR_PGM_SHW_NONE_IPE; 671 # endif /* !VBOX_WITH_NESTED_HWVIRT_VMX_EPT */ 672 } 556 673 557 674 # else /* PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT */
Note:
See TracChangeset
for help on using the changeset viewer.