Changeset 31092 in vbox
- Timestamp:
- Jul 26, 2010 7:17:25 AM (15 years ago)
- svn:sync-xref-src-repo-rev:
- 64042
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGMInternal.h
r31081 r31092 62 62 # define PGM_WITHOUT_MAPPINGS 63 63 #endif 64 65 /**66 * Solve page is out of sync issues inside Guest Context (in PGMGC.cpp).67 * Comment it if it will break something.68 */69 #define PGM_OUT_OF_SYNC_IN_GC70 64 71 65 /** -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r31090 r31092 437 437 * in page tables which the guest believes to be present. 438 438 */ 439 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 439 440 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 440 Assert(GstWalk.Pde.n.u1Present); 441 RTGCPHYS GCPhys = GstWalk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK; 442 # else 443 RTGCPHYS GCPhys = (RTGCPHYS)pvFault & ~(RTGCPHYS)PAGE_OFFSET_MASK; 444 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 445 PPGMPAGE pPage; 446 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage); 447 if (RT_FAILURE(rc)) 448 { 449 /* 450 * When the guest accesses invalid physical memory (e.g. probing 451 * of RAM or accessing a remapped MMIO range), then we'll fall 452 * back to the recompiler to emulate the instruction. 453 */ 454 LogFlow(("PGM #PF: pgmPhysGetPageEx(%RGp) failed with %Rrc\n", GCPhys, rc)); 455 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersInvalid); 456 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 457 return VINF_EM_RAW_EMULATE_INSTR; 458 } 459 460 /* 461 * Any handlers? 462 */ 463 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 464 { 465 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage)) 466 { 467 /* 468 * Physical page access handler. 469 */ 470 const RTGCPHYS GCPhysFault = GCPhys | (pvFault & PAGE_OFFSET_MASK); 471 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysFault); 472 if (pCur) 473 { 474 # ifdef PGM_SYNC_N_PAGES 475 /* 476 * If the region is write protected and we got a page not present fault, then sync 477 * the pages. If the fault was caused by a read, then restart the instruction. 478 * In case of write access continue to the GC write handler. 479 * 480 * ASSUMES that there is only one handler per page or that they have similar write properties. 481 */ 482 if ( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE 483 && !(uErr & X86_TRAP_PF_P)) 484 { 485 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 486 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 487 # else 488 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr); 489 # endif 490 if ( RT_FAILURE(rc) 491 || !(uErr & X86_TRAP_PF_RW) 492 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE) 493 { 494 AssertRC(rc); 495 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersOutOfSync); 496 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 497 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; }); 498 return rc; 499 } 500 } 441 501 # endif 442 { 502 503 AssertMsg( pCur->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE 504 || (pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE && (uErr & X86_TRAP_PF_RW)), 505 ("Unexpected trap for physical handler: %08X (phys=%08x) pPage=%R[pgmpage] uErr=%X, enum=%d\n", pvFault, GCPhys, pPage, uErr, pCur->enmType)); 506 507 # if defined(IN_RC) || defined(IN_RING0) 508 if (pCur->CTX_SUFF(pfnHandler)) 509 { 510 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 511 # ifdef IN_RING0 512 PFNPGMR0PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler); 513 # else 514 PFNPGMRCPHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler); 515 # endif 516 bool fLeaveLock = (pfnHandler != pPool->CTX_SUFF(pfnAccessHandler)); 517 void *pvUser = pCur->CTX_SUFF(pvUser); 518 519 STAM_PROFILE_START(&pCur->Stat, h); 520 if (fLeaveLock) 521 pgmUnlock(pVM); /* @todo: Not entirely safe. */ 522 523 rc = pfnHandler(pVM, uErr, pRegFrame, pvFault, GCPhysFault, pvUser); 524 if (fLeaveLock) 525 pgmLock(pVM); 526 # ifdef VBOX_WITH_STATISTICS 527 pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysFault); 528 if (pCur) 529 STAM_PROFILE_STOP(&pCur->Stat, h); 530 # else 531 pCur = NULL; /* might be invalid by now. */ 532 # endif 533 534 } 535 else 536 # endif 537 rc = VINF_EM_RAW_EMULATE_INSTR; 538 539 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersPhysical); 540 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 541 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndPhys; }); 542 return rc; 543 } 544 } 443 545 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 444 RTGCPHYS GCPhys = GstWalk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK; 546 else 547 { 548 # ifdef PGM_SYNC_N_PAGES 549 /* 550 * If the region is write protected and we got a page not present fault, then sync 551 * the pages. If the fault was caused by a read, then restart the instruction. 552 * In case of write access continue to the GC write handler. 553 */ 554 if ( PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < PGM_PAGE_HNDL_PHYS_STATE_ALL 555 && !(uErr & X86_TRAP_PF_P)) 556 { 557 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 558 if ( RT_FAILURE(rc) 559 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE 560 || !(uErr & X86_TRAP_PF_RW)) 561 { 562 AssertRC(rc); 563 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersOutOfSync); 564 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 565 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndVirt; }); 566 return rc; 567 } 568 } 569 # endif 570 /* 571 * Ok, it's an virtual page access handler. 572 * 573 * Since it's faster to search by address, we'll do that first 574 * and then retry by GCPhys if that fails. 575 */ 576 /** @todo r=bird: perhaps we should consider looking up by physical address directly now? */ 577 /** @note r=svl: true, but lookup on virtual address should remain as a fallback as phys & virt trees might be out of sync, because the 578 * page was changed without us noticing it (not-present -> present without invlpg or mov cr3, xxx) 579 */ 580 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault); 581 if (pCur) 582 { 583 AssertMsg(!(pvFault - pCur->Core.Key < pCur->cb) 584 || ( pCur->enmType != PGMVIRTHANDLERTYPE_WRITE 585 || !(uErr & X86_TRAP_PF_P) 586 || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))), 587 ("Unexpected trap for virtual handler: %RGv (phys=%RGp) pPage=%R[pgmpage] uErr=%X, enum=%d\n", pvFault, GCPhys, pPage, uErr, pCur->enmType)); 588 589 if ( pvFault - pCur->Core.Key < pCur->cb 590 && ( uErr & X86_TRAP_PF_RW 591 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) ) 592 { 593 # ifdef IN_RC 594 STAM_PROFILE_START(&pCur->Stat, h); 595 pgmUnlock(pVM); 596 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key); 597 pgmLock(pVM); 598 STAM_PROFILE_STOP(&pCur->Stat, h); 599 # else 600 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */ 601 # endif 602 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersVirtual); 603 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 604 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; }); 605 return rc; 606 } 607 /* Unhandled part of a monitored page */ 608 } 609 else 610 { 611 /* Check by physical address. */ 612 unsigned iPage; 613 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys + (pvFault & PAGE_OFFSET_MASK), 614 &pCur, &iPage); 615 Assert(RT_SUCCESS(rc) || !pCur); 616 if ( pCur 617 && ( uErr & X86_TRAP_PF_RW 618 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) ) 619 { 620 Assert((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == GCPhys); 621 # ifdef IN_RC 622 RTGCPTR off = (iPage << PAGE_SHIFT) + (pvFault & PAGE_OFFSET_MASK) - (pCur->Core.Key & PAGE_OFFSET_MASK); 623 Assert(off < pCur->cb); 624 STAM_PROFILE_START(&pCur->Stat, h); 625 pgmUnlock(pVM); 626 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, off); 627 pgmLock(pVM); 628 STAM_PROFILE_STOP(&pCur->Stat, h); 629 # else 630 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */ 631 # endif 632 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersVirtualByPhys); 633 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 634 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; }); 635 return rc; 636 } 637 } 638 } 639 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 640 641 /* 642 * There is a handled area of the page, but this fault doesn't belong to it. 643 * We must emulate the instruction. 644 * 645 * To avoid crashing (non-fatal) in the interpreter and go back to the recompiler 646 * we first check if this was a page-not-present fault for a page with only 647 * write access handlers. Restart the instruction if it wasn't a write access. 648 */ 649 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersUnhandled); 650 651 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) 652 && !(uErr & X86_TRAP_PF_P)) 653 { 654 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 655 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 445 656 # else 446 RTGCPHYS GCPhys = (RTGCPHYS)pvFault & ~(RTGCPHYS)PAGE_OFFSET_MASK; 657 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr); 658 # endif 659 if ( RT_FAILURE(rc) 660 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE 661 || !(uErr & X86_TRAP_PF_RW)) 662 { 663 AssertRC(rc); 664 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersOutOfSync); 665 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 666 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; }); 667 return rc; 668 } 669 } 670 671 /** @todo This particular case can cause quite a lot of overhead. E.g. early stage of kernel booting in Ubuntu 6.06 672 * It's writing to an unhandled part of the LDT page several million times. 673 */ 674 rc = PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault); 675 LogFlow(("PGM: PGMInterpretInstruction -> rc=%d pPage=%R[pgmpage]\n", rc, pPage)); 676 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 677 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndUnhandled; }); 678 return rc; 679 } /* if any kind of handler */ 680 681 682 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 683 if (uErr & X86_TRAP_PF_P) 684 { 685 /* 686 * The page isn't marked, but it might still be monitored by a virtual page access handler. 687 * (ASSUMES no temporary disabling of virtual handlers.) 688 */ 689 /** @todo r=bird: Since the purpose is to catch out of sync pages with virtual handler(s) here, 690 * we should correct both the shadow page table and physical memory flags, and not only check for 691 * accesses within the handler region but for access to pages with virtual handlers. */ 692 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault); 693 if (pCur) 694 { 695 AssertMsg( !(pvFault - pCur->Core.Key < pCur->cb) 696 || ( pCur->enmType != PGMVIRTHANDLERTYPE_WRITE 697 || !(uErr & X86_TRAP_PF_P) 698 || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))), 699 ("Unexpected trap for virtual handler: %08X (phys=%08x) %R[pgmpage] uErr=%X, enum=%d\n", pvFault, GCPhys, pPage, uErr, pCur->enmType)); 700 701 if ( pvFault - pCur->Core.Key < pCur->cb 702 && ( uErr & X86_TRAP_PF_RW 703 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) ) 704 { 705 # ifdef IN_RC 706 STAM_PROFILE_START(&pCur->Stat, h); 707 pgmUnlock(pVM); 708 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key); 709 pgmLock(pVM); 710 STAM_PROFILE_STOP(&pCur->Stat, h); 711 # else 712 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */ 713 # endif 714 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersVirtualUnmarked); 715 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 716 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; }); 717 return rc; 718 } 719 } 720 } 447 721 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 448 722 723 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 724 725 /* 726 * We are here only if page is present in Guest page tables and 727 * trap is not handled by our handlers. 728 * 729 * Check it for page out-of-sync situation. 730 */ 731 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c); 732 if (!(uErr & X86_TRAP_PF_P)) 733 { 449 734 /* 450 * If we have a GC address we'll check if it has any flags set. 735 * Page is not present in our page tables. Try to sync it! 736 * BTW, fPageShw is invalid in this branch! 451 737 */ 452 { 453 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 454 455 PPGMPAGE pPage; 456 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage); 457 if (RT_SUCCESS(rc)) /** just handle the failure immediate (it returns) and make things easier to read. */ 738 if (uErr & X86_TRAP_PF_US) 739 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUser)); 740 else /* supervisor */ 741 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor)); 742 743 if (PGM_PAGE_IS_BALLOONED(pPage)) 744 { 745 /* Emulate reads from ballooned pages as they are not present in 746 our shadow page tables. (Required for e.g. Solaris guests; soft 747 ecc, random nr generator.) */ 748 rc = PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault); 749 LogFlow(("PGM: PGMInterpretInstruction balloon -> rc=%d pPage=%R[pgmpage]\n", rc, pPage)); 750 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncBallloon)); 751 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 752 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndUnhandled; }); 753 return rc; 754 } 755 756 # if defined(LOG_ENABLED) && !defined(IN_RING0) 757 RTGCPHYS GCPhys2; 758 uint64_t fPageGst2; 759 PGMGstGetPage(pVCpu, pvFault, &fPageGst2, &GCPhys2); 760 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 761 Log(("Page out of sync: %RGv eip=%08x PdeSrc.US=%d fPageGst2=%08llx GCPhys2=%RGp scan=%d\n", 762 pvFault, pRegFrame->eip, GstWalk.Pde.n.u1User, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip))); 763 # else 764 Log(("Page out of sync: %RGv eip=%08x fPageGst2=%08llx GCPhys2=%RGp scan=%d\n", 765 pvFault, pRegFrame->eip, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip))); 766 # endif 767 # endif /* LOG_ENABLED */ 768 769 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) 770 if (CPUMGetGuestCPL(pVCpu, pRegFrame) == 0) 771 { 772 /** @todo It's not necessary to repeat this here, GstWalk has 773 * all the information. */ 774 uint64_t fPageGst; 775 rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, NULL); 776 if ( RT_SUCCESS(rc) 777 && !(fPageGst & X86_PTE_US)) 458 778 { 459 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 779 /* Note: Can't check for X86_TRAP_ID bit, because that requires execute disable support on the CPU. */ 780 if ( pvFault == (RTGCPTR)pRegFrame->eip 781 || pvFault - pRegFrame->eip < 8 /* instruction crossing a page boundary */ 782 # ifdef CSAM_DETECT_NEW_CODE_PAGES 783 || ( !PATMIsPatchGCAddr(pVM, pRegFrame->eip) 784 && CSAMDoesPageNeedScanning(pVM, pRegFrame->eip)) /* any new code we encounter here */ 785 # endif /* CSAM_DETECT_NEW_CODE_PAGES */ 786 ) 460 787 { 461 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage)) 788 LogFlow(("CSAMExecFault %RX32\n", pRegFrame->eip)); 789 rc = CSAMExecFault(pVM, (RTRCPTR)pRegFrame->eip); 790 if (rc != VINF_SUCCESS) 462 791 { 463 792 /* 464 * Physical page access handler. 793 * CSAM needs to perform a job in ring 3. 794 * 795 * Sync the page before going to the host context; otherwise we'll end up in a loop if 796 * CSAM fails (e.g. instruction crosses a page boundary and the next page is not present) 465 797 */ 466 const RTGCPHYS GCPhysFault = GCPhys | (pvFault & PAGE_OFFSET_MASK); 467 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysFault); 468 if (pCur) 798 LogFlow(("CSAM ring 3 job\n")); 799 int rc2 = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr); 800 AssertRC(rc2); 801 802 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c); 803 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2CSAM; }); 804 return rc; 805 } 806 } 807 # ifdef CSAM_DETECT_NEW_CODE_PAGES 808 else if ( uErr == X86_TRAP_PF_RW 809 && pRegFrame->ecx >= 0x100 /* early check for movswd count */ 810 && pRegFrame->ecx < 0x10000) 811 { 812 /* In case of a write to a non-present supervisor shadow page, we'll take special precautions 813 * to detect loading of new code pages. 814 */ 815 816 /* 817 * Decode the instruction. 818 */ 819 RTGCPTR PC; 820 rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, 821 &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &PC); 822 if (rc == VINF_SUCCESS) 823 { 824 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState; 825 uint32_t cbOp; 826 rc = EMInterpretDisasOneEx(pVM, pVCpu, PC, pRegFrame, pDis, &cbOp); 827 828 /* For now we'll restrict this to rep movsw/d instructions */ 829 if ( rc == VINF_SUCCESS 830 && pDis->pCurInstr->opcode == OP_MOVSWD 831 && (pDis->prefix & PREFIX_REP)) 469 832 { 470 # ifdef PGM_SYNC_N_PAGES 471 /* 472 * If the region is write protected and we got a page not present fault, then sync 473 * the pages. If the fault was caused by a read, then restart the instruction. 474 * In case of write access continue to the GC write handler. 475 * 476 * ASSUMES that there is only one handler per page or that they have similar write properties. 477 */ 478 if ( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE 479 && !(uErr & X86_TRAP_PF_P)) 480 { 481 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 482 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 483 # else 484 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr); 485 # endif 486 if ( RT_FAILURE(rc) 487 || !(uErr & X86_TRAP_PF_RW) 488 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE) 489 { 490 AssertRC(rc); 491 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersOutOfSync); 492 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 493 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; }); 494 return rc; 495 } 496 } 497 # endif 498 499 AssertMsg( pCur->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE 500 || (pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE && (uErr & X86_TRAP_PF_RW)), 501 ("Unexpected trap for physical handler: %08X (phys=%08x) pPage=%R[pgmpage] uErr=%X, enum=%d\n", pvFault, GCPhys, pPage, uErr, pCur->enmType)); 502 503 # if defined(IN_RC) || defined(IN_RING0) 504 if (pCur->CTX_SUFF(pfnHandler)) 505 { 506 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 507 # ifdef IN_RING0 508 PFNPGMR0PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler); 509 # else 510 PFNPGMRCPHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler); 511 # endif 512 bool fLeaveLock = (pfnHandler != pPool->CTX_SUFF(pfnAccessHandler)); 513 void *pvUser = pCur->CTX_SUFF(pvUser); 514 515 STAM_PROFILE_START(&pCur->Stat, h); 516 if (fLeaveLock) 517 pgmUnlock(pVM); /* @todo: Not entirely safe. */ 518 519 rc = pfnHandler(pVM, uErr, pRegFrame, pvFault, GCPhysFault, pvUser); 520 if (fLeaveLock) 521 pgmLock(pVM); 522 # ifdef VBOX_WITH_STATISTICS 523 pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysFault); 524 if (pCur) 525 STAM_PROFILE_STOP(&pCur->Stat, h); 526 # else 527 pCur = NULL; /* might be invalid by now. */ 528 # endif 529 530 } 531 else 532 # endif 533 rc = VINF_EM_RAW_EMULATE_INSTR; 534 535 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersPhysical); 536 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 537 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndPhys; }); 538 return rc; 539 } 540 } 541 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 542 else 543 { 544 # ifdef PGM_SYNC_N_PAGES 545 /* 546 * If the region is write protected and we got a page not present fault, then sync 547 * the pages. If the fault was caused by a read, then restart the instruction. 548 * In case of write access continue to the GC write handler. 549 */ 550 if ( PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < PGM_PAGE_HNDL_PHYS_STATE_ALL 551 && !(uErr & X86_TRAP_PF_P)) 552 { 553 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 554 if ( RT_FAILURE(rc) 555 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE 556 || !(uErr & X86_TRAP_PF_RW)) 557 { 558 AssertRC(rc); 559 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersOutOfSync); 560 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 561 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndVirt; }); 562 return rc; 563 } 564 } 565 # endif 566 /* 567 * Ok, it's an virtual page access handler. 568 * 569 * Since it's faster to search by address, we'll do that first 570 * and then retry by GCPhys if that fails. 571 */ 572 /** @todo r=bird: perhaps we should consider looking up by physical address directly now? */ 573 /** @note r=svl: true, but lookup on virtual address should remain as a fallback as phys & virt trees might be out of sync, because the 574 * page was changed without us noticing it (not-present -> present without invlpg or mov cr3, xxx) 575 */ 576 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault); 577 if (pCur) 578 { 579 AssertMsg(!(pvFault - pCur->Core.Key < pCur->cb) 580 || ( pCur->enmType != PGMVIRTHANDLERTYPE_WRITE 581 || !(uErr & X86_TRAP_PF_P) 582 || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))), 583 ("Unexpected trap for virtual handler: %RGv (phys=%RGp) pPage=%R[pgmpage] uErr=%X, enum=%d\n", pvFault, GCPhys, pPage, uErr, pCur->enmType)); 584 585 if ( pvFault - pCur->Core.Key < pCur->cb 586 && ( uErr & X86_TRAP_PF_RW 587 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) ) 588 { 589 # ifdef IN_RC 590 STAM_PROFILE_START(&pCur->Stat, h); 591 pgmUnlock(pVM); 592 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key); 593 pgmLock(pVM); 594 STAM_PROFILE_STOP(&pCur->Stat, h); 595 # else 596 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */ 597 # endif 598 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersVirtual); 599 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 600 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; }); 601 return rc; 602 } 603 /* Unhandled part of a monitored page */ 604 } 605 else 606 { 607 /* Check by physical address. */ 608 unsigned iPage; 609 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys + (pvFault & PAGE_OFFSET_MASK), 610 &pCur, &iPage); 611 Assert(RT_SUCCESS(rc) || !pCur); 612 if ( pCur 613 && ( uErr & X86_TRAP_PF_RW 614 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) ) 615 { 616 Assert((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == GCPhys); 617 # ifdef IN_RC 618 RTGCPTR off = (iPage << PAGE_SHIFT) + (pvFault & PAGE_OFFSET_MASK) - (pCur->Core.Key & PAGE_OFFSET_MASK); 619 Assert(off < pCur->cb); 620 STAM_PROFILE_START(&pCur->Stat, h); 621 pgmUnlock(pVM); 622 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, off); 623 pgmLock(pVM); 624 STAM_PROFILE_STOP(&pCur->Stat, h); 625 # else 626 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */ 627 # endif 628 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersVirtualByPhys); 629 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 630 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; }); 631 return rc; 632 } 633 } 634 } 635 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 636 637 /* 638 * There is a handled area of the page, but this fault doesn't belong to it. 639 * We must emulate the instruction. 640 * 641 * To avoid crashing (non-fatal) in the interpreter and go back to the recompiler 642 * we first check if this was a page-not-present fault for a page with only 643 * write access handlers. Restart the instruction if it wasn't a write access. 644 */ 645 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersUnhandled); 646 647 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) 648 && !(uErr & X86_TRAP_PF_P)) 649 { 650 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 651 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 652 # else 653 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr); 654 # endif 655 if ( RT_FAILURE(rc) 656 || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE 657 || !(uErr & X86_TRAP_PF_RW)) 658 { 659 AssertRC(rc); 660 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersOutOfSync); 661 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 662 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; }); 663 return rc; 664 } 665 } 666 667 /** @todo This particular case can cause quite a lot of overhead. E.g. early stage of kernel booting in Ubuntu 6.06 668 * It's writing to an unhandled part of the LDT page several million times. 669 */ 670 rc = PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault); 671 LogFlow(("PGM: PGMInterpretInstruction -> rc=%d pPage=%R[pgmpage]\n", rc, pPage)); 672 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 673 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndUnhandled; }); 674 return rc; 675 } /* if any kind of handler */ 676 677 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 678 if (uErr & X86_TRAP_PF_P) 679 { 680 /* 681 * The page isn't marked, but it might still be monitored by a virtual page access handler. 682 * (ASSUMES no temporary disabling of virtual handlers.) 683 */ 684 /** @todo r=bird: Since the purpose is to catch out of sync pages with virtual handler(s) here, 685 * we should correct both the shadow page table and physical memory flags, and not only check for 686 * accesses within the handler region but for access to pages with virtual handlers. */ 687 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault); 688 if (pCur) 689 { 690 AssertMsg( !(pvFault - pCur->Core.Key < pCur->cb) 691 || ( pCur->enmType != PGMVIRTHANDLERTYPE_WRITE 692 || !(uErr & X86_TRAP_PF_P) 693 || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))), 694 ("Unexpected trap for virtual handler: %08X (phys=%08x) %R[pgmpage] uErr=%X, enum=%d\n", pvFault, GCPhys, pPage, uErr, pCur->enmType)); 695 696 if ( pvFault - pCur->Core.Key < pCur->cb 697 && ( uErr & X86_TRAP_PF_RW 698 || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) ) 699 { 700 # ifdef IN_RC 701 STAM_PROFILE_START(&pCur->Stat, h); 702 pgmUnlock(pVM); 703 rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key); 704 pgmLock(pVM); 705 STAM_PROFILE_STOP(&pCur->Stat, h); 706 # else 707 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */ 708 # endif 709 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersVirtualUnmarked); 710 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 711 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; }); 712 return rc; 833 CSAMMarkPossibleCodePage(pVM, pvFault); 713 834 } 714 835 } 715 836 } 716 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 837 # endif /* CSAM_DETECT_NEW_CODE_PAGES */ 838 839 /* 840 * Mark this page as safe. 841 */ 842 /** @todo not correct for pages that contain both code and data!! */ 843 Log2(("CSAMMarkPage %RGv; scanned=%d\n", pvFault, true)); 844 CSAMMarkPage(pVM, pvFault, true); 845 } 846 } 847 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) */ 848 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 849 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 850 # else 851 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr); 852 # endif 853 if (RT_SUCCESS(rc)) 854 { 855 /* The page was successfully synced, return to the guest. */ 856 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c); 857 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSync; }); 858 return VINF_SUCCESS; 859 } 860 } 861 else /* uErr & X86_TRAP_PF_P: */ 862 { 863 /* 864 * Write protected pages are make writable when the guest makes the first 865 * write to it. This happens for pages that are shared, write monitored 866 * and not yet allocated. 867 * 868 * Also, a side effect of not flushing global PDEs are out of sync pages due 869 * to physical monitored regions, that are no longer valid. 870 * Assume for now it only applies to the read/write flag. 871 */ 872 if ( RT_SUCCESS(rc) 873 && (uErr & X86_TRAP_PF_RW)) 874 { 875 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED) 876 { 877 Log(("PGM #PF: Make writable: %RGp %R[pgmpage] pvFault=%RGp uErr=%#x\n", GCPhys, pPage, pvFault, uErr)); 878 Assert(!PGM_PAGE_IS_ZERO(pPage)); 879 AssertFatalMsg(!PGM_PAGE_IS_BALLOONED(pPage), ("Unexpected ballooned page at %RGp\n", GCPhys)); 880 881 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys); 882 if (rc != VINF_SUCCESS) 883 { 884 AssertMsg(rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("%Rrc\n", rc)); 885 return rc; 886 } 887 if (RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))) 888 return VINF_EM_NO_MEMORY; 889 } 890 891 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 892 /* Check to see if we need to emulate the instruction as X86_CR0_WP has been cleared. */ 893 if ( CPUMGetGuestCPL(pVCpu, pRegFrame) == 0 894 && (CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG) 895 { 896 Assert((uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_P)) == (X86_TRAP_PF_RW | X86_TRAP_PF_P)); 897 /** @todo It's not necessary to repeat this here, GstWalk has 898 * all the information. */ 899 uint64_t fPageGst; 900 rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, NULL); 901 if ( RT_SUCCESS(rc) 902 && !(fPageGst & X86_PTE_RW)) 903 { 904 rc = PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault); 905 if (RT_SUCCESS(rc)) 906 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eWPEmulInRZ); 907 else 908 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eWPEmulToR3); 909 return rc; 910 } 911 AssertMsg(RT_SUCCESS(rc), ("Unexpected r/w page %RGv flag=%x rc=%Rrc\n", pvFault, (uint32_t)fPageGst, rc)); 912 } 913 # endif 914 /// @todo count the above case; else 915 if (uErr & X86_TRAP_PF_US) 916 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUserWrite)); 917 else /* supervisor */ 918 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisorWrite)); 919 920 /* 921 * Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the 922 * page is not present, which is not true in this case. 923 */ 924 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 925 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr); 926 # else 927 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, 1, uErr); 928 # endif 929 if (RT_SUCCESS(rc)) 930 { 931 /* 932 * Page was successfully synced, return to guest. 933 * First invalidate the page as it might be in the TLB. 934 */ 935 # if PGM_SHW_TYPE == PGM_TYPE_EPT 936 HWACCMInvalidatePhysPage(pVM, (RTGCPHYS)pvFault); 937 # else 938 PGM_INVL_PG(pVCpu, pvFault); 939 # endif 940 # ifdef VBOX_STRICT 941 RTGCPHYS GCPhys2; 942 uint64_t fPageGst; 943 if (!pVM->pgm.s.fNestedPaging) 944 { 945 rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, &GCPhys2); 946 AssertMsg(RT_SUCCESS(rc) && (fPageGst & X86_PTE_RW), ("rc=%Rrc fPageGst=%RX64\n", rc, fPageGst)); 947 LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GCPhys2, (uint64_t)fPageGst)); 948 } 949 uint64_t fPageShw; 950 rc = PGMShwGetPage(pVCpu, pvFault, &fPageShw, NULL); 951 AssertMsg((RT_SUCCESS(rc) && (fPageShw & X86_PTE_RW)) || pVM->cCpus > 1 /* new monitor can be installed/page table flushed between the trap exit and PGMTrap0eHandler */, ("rc=%Rrc fPageShw=%RX64\n", rc, fPageShw)); 952 # endif /* VBOX_STRICT */ 953 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c); 954 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndObs; }); 955 return VINF_SUCCESS; 956 } 957 } 958 959 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 960 # ifdef VBOX_STRICT 961 /* 962 * Check for VMM page flags vs. Guest page flags consistency. 963 * Currently only for debug purposes. 964 */ 965 if (RT_SUCCESS(rc)) 966 { 967 /* Get guest page flags. */ 968 uint64_t fPageGst; 969 rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, NULL); 970 if (RT_SUCCESS(rc)) 971 { 972 uint64_t fPageShw; 973 rc = PGMShwGetPage(pVCpu, pvFault, &fPageShw, NULL); 974 975 /* 976 * Compare page flags. 977 * Note: we have AVL, A, D bits desynched. 978 */ 979 AssertMsg((fPageShw & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)) == (fPageGst & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)), 980 ("Page flags mismatch! pvFault=%RGv uErr=%x GCPhys=%RGp fPageShw=%RX64 fPageGst=%RX64\n", pvFault, (uint32_t)uErr, GCPhys, fPageShw, fPageGst)); 717 981 } 718 982 else 719 { 720 /* 721 * When the guest accesses invalid physical memory (e.g. probing 722 * of RAM or accessing a remapped MMIO range), then we'll fall 723 * back to the recompiler to emulate the instruction. 724 */ 725 LogFlow(("PGM #PF: pgmPhysGetPageEx(%RGp) failed with %Rrc\n", GCPhys, rc)); 726 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersInvalid); 727 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 728 return VINF_EM_RAW_EMULATE_INSTR; 729 } 730 731 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 732 733 # ifdef PGM_OUT_OF_SYNC_IN_GC /** @todo remove this bugger. */ 734 /* 735 * We are here only if page is present in Guest page tables and 736 * trap is not handled by our handlers. 737 * 738 * Check it for page out-of-sync situation. 739 */ 740 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c); 741 742 if (!(uErr & X86_TRAP_PF_P)) 743 { 744 /* 745 * Page is not present in our page tables. 746 * Try to sync it! 747 * BTW, fPageShw is invalid in this branch! 748 */ 749 if (uErr & X86_TRAP_PF_US) 750 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUser)); 751 else /* supervisor */ 752 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor)); 753 754 if (PGM_PAGE_IS_BALLOONED(pPage)) 755 { 756 /* Emulate reads from ballooned pages as they are not present in our shadow page tables. (required for e.g. Solaris guests; soft ecc, random nr generator) */ 757 rc = PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault); 758 LogFlow(("PGM: PGMInterpretInstruction balloon -> rc=%d pPage=%R[pgmpage]\n", rc, pPage)); 759 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncBallloon)); 760 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b); 761 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndUnhandled; }); 762 return rc; 763 } 764 # if defined(LOG_ENABLED) && !defined(IN_RING0) 765 RTGCPHYS GCPhys2; 766 uint64_t fPageGst2; 767 PGMGstGetPage(pVCpu, pvFault, &fPageGst2, &GCPhys2); 768 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 769 Log(("Page out of sync: %RGv eip=%08x PdeSrc.US=%d fPageGst2=%08llx GCPhys2=%RGp scan=%d\n", 770 pvFault, pRegFrame->eip, GstWalk.Pde.n.u1User, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip))); 771 # else 772 Log(("Page out of sync: %RGv eip=%08x fPageGst2=%08llx GCPhys2=%RGp scan=%d\n", 773 pvFault, pRegFrame->eip, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip))); 774 # endif 775 # endif /* LOG_ENABLED */ 776 777 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) 778 if (CPUMGetGuestCPL(pVCpu, pRegFrame) == 0) 779 { 780 uint64_t fPageGst; 781 rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, NULL); 782 if ( RT_SUCCESS(rc) 783 && !(fPageGst & X86_PTE_US)) 784 { 785 /* Note: can't check for X86_TRAP_ID bit, because that requires execute disable support on the CPU */ 786 if ( pvFault == (RTGCPTR)pRegFrame->eip 787 || pvFault - pRegFrame->eip < 8 /* instruction crossing a page boundary */ 788 # ifdef CSAM_DETECT_NEW_CODE_PAGES 789 || ( !PATMIsPatchGCAddr(pVM, pRegFrame->eip) 790 && CSAMDoesPageNeedScanning(pVM, pRegFrame->eip)) /* any new code we encounter here */ 791 # endif /* CSAM_DETECT_NEW_CODE_PAGES */ 792 ) 793 { 794 LogFlow(("CSAMExecFault %RX32\n", pRegFrame->eip)); 795 rc = CSAMExecFault(pVM, (RTRCPTR)pRegFrame->eip); 796 if (rc != VINF_SUCCESS) 797 { 798 /* 799 * CSAM needs to perform a job in ring 3. 800 * 801 * Sync the page before going to the host context; otherwise we'll end up in a loop if 802 * CSAM fails (e.g. instruction crosses a page boundary and the next page is not present) 803 */ 804 LogFlow(("CSAM ring 3 job\n")); 805 int rc2 = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr); 806 AssertRC(rc2); 807 808 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c); 809 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2CSAM; }); 810 return rc; 811 } 812 } 813 # ifdef CSAM_DETECT_NEW_CODE_PAGES 814 else if ( uErr == X86_TRAP_PF_RW 815 && pRegFrame->ecx >= 0x100 /* early check for movswd count */ 816 && pRegFrame->ecx < 0x10000) 817 { 818 /* In case of a write to a non-present supervisor shadow page, we'll take special precautions 819 * to detect loading of new code pages. 820 */ 821 822 /* 823 * Decode the instruction. 824 */ 825 RTGCPTR PC; 826 rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, 827 &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &PC); 828 if (rc == VINF_SUCCESS) 829 { 830 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState; 831 uint32_t cbOp; 832 rc = EMInterpretDisasOneEx(pVM, pVCpu, PC, pRegFrame, pDis, &cbOp); 833 834 /* For now we'll restrict this to rep movsw/d instructions */ 835 if ( rc == VINF_SUCCESS 836 && pDis->pCurInstr->opcode == OP_MOVSWD 837 && (pDis->prefix & PREFIX_REP)) 838 { 839 CSAMMarkPossibleCodePage(pVM, pvFault); 840 } 841 } 842 } 843 # endif /* CSAM_DETECT_NEW_CODE_PAGES */ 844 845 /* 846 * Mark this page as safe. 847 */ 848 /** @todo not correct for pages that contain both code and data!! */ 849 Log2(("CSAMMarkPage %RGv; scanned=%d\n", pvFault, true)); 850 CSAMMarkPage(pVM, pvFault, true); 851 } 852 } 853 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) */ 854 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 855 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr); 856 # else 857 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr); 858 # endif 859 if (RT_SUCCESS(rc)) 860 { 861 /* The page was successfully synced, return to the guest. */ 862 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c); 863 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSync; }); 864 return VINF_SUCCESS; 865 } 866 } 867 else /* uErr & X86_TRAP_PF_P: */ 868 { 869 /* 870 * Write protected pages are make writable when the guest makes the first 871 * write to it. This happens for pages that are shared, write monitored 872 * and not yet allocated. 873 * 874 * Also, a side effect of not flushing global PDEs are out of sync pages due 875 * to physical monitored regions, that are no longer valid. 876 * Assume for now it only applies to the read/write flag. 877 */ 878 if ( RT_SUCCESS(rc) 879 && (uErr & X86_TRAP_PF_RW)) 880 { 881 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED) 882 { 883 Log(("PGM #PF: Make writable: %RGp %R[pgmpage] pvFault=%RGp uErr=%#x\n", GCPhys, pPage, pvFault, uErr)); 884 Assert(!PGM_PAGE_IS_ZERO(pPage)); 885 AssertFatalMsg(!PGM_PAGE_IS_BALLOONED(pPage), ("Unexpected ballooned page at %RGp\n", GCPhys)); 886 887 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys); 888 if (rc != VINF_SUCCESS) 889 { 890 AssertMsg(rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("%Rrc\n", rc)); 891 return rc; 892 } 893 if (RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))) 894 return VINF_EM_NO_MEMORY; 895 } 896 897 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 898 /* Check to see if we need to emulate the instruction as X86_CR0_WP has been cleared. */ 899 if ( CPUMGetGuestCPL(pVCpu, pRegFrame) == 0 900 && ((CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG)) 901 { 902 Assert((uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_P)) == (X86_TRAP_PF_RW | X86_TRAP_PF_P)); 903 uint64_t fPageGst; 904 rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, NULL); 905 if ( RT_SUCCESS(rc) 906 && !(fPageGst & X86_PTE_RW)) 907 { 908 rc = PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault); 909 if (RT_SUCCESS(rc)) 910 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eWPEmulInRZ); 911 else 912 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eWPEmulToR3); 913 return rc; 914 } 915 AssertMsg(RT_SUCCESS(rc), ("Unexpected r/w page %RGv flag=%x rc=%Rrc\n", pvFault, (uint32_t)fPageGst, rc)); 916 } 917 # endif 918 /// @todo count the above case; else 919 if (uErr & X86_TRAP_PF_US) 920 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncUserWrite)); 921 else /* supervisor */ 922 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisorWrite)); 923 924 /* 925 * Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the 926 * page is not present, which is not true in this case. 927 */ 928 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 929 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr); 930 # else 931 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, 1, uErr); 932 # endif 933 if (RT_SUCCESS(rc)) 934 { 935 /* 936 * Page was successfully synced, return to guest. 937 * First invalidate the page as it might be in the TLB. 938 */ 939 # if PGM_SHW_TYPE == PGM_TYPE_EPT 940 HWACCMInvalidatePhysPage(pVM, (RTGCPHYS)pvFault); 941 # else 942 PGM_INVL_PG(pVCpu, pvFault); 943 # endif 944 # ifdef VBOX_STRICT 945 RTGCPHYS GCPhys2; 946 uint64_t fPageGst; 947 if (!pVM->pgm.s.fNestedPaging) 948 { 949 rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, &GCPhys2); 950 AssertMsg(RT_SUCCESS(rc) && (fPageGst & X86_PTE_RW), ("rc=%Rrc fPageGst=%RX64\n", rc, fPageGst)); 951 LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GCPhys2, (uint64_t)fPageGst)); 952 } 953 uint64_t fPageShw; 954 rc = PGMShwGetPage(pVCpu, pvFault, &fPageShw, NULL); 955 AssertMsg((RT_SUCCESS(rc) && (fPageShw & X86_PTE_RW)) || pVM->cCpus > 1 /* new monitor can be installed/page table flushed between the trap exit and PGMTrap0eHandler */, ("rc=%Rrc fPageShw=%RX64\n", rc, fPageShw)); 956 # endif /* VBOX_STRICT */ 957 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c); 958 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndObs; }); 959 return VINF_SUCCESS; 960 } 961 } 962 963 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 964 # ifdef VBOX_STRICT 965 /* 966 * Check for VMM page flags vs. Guest page flags consistency. 967 * Currently only for debug purposes. 968 */ 969 if (RT_SUCCESS(rc)) 970 { 971 /* Get guest page flags. */ 972 uint64_t fPageGst; 973 rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, NULL); 974 if (RT_SUCCESS(rc)) 975 { 976 uint64_t fPageShw; 977 rc = PGMShwGetPage(pVCpu, pvFault, &fPageShw, NULL); 978 979 /* 980 * Compare page flags. 981 * Note: we have AVL, A, D bits desynched. 982 */ 983 AssertMsg((fPageShw & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)) == (fPageGst & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)), 984 ("Page flags mismatch! pvFault=%RGv uErr=%x GCPhys=%RGp fPageShw=%RX64 fPageGst=%RX64\n", pvFault, (uint32_t)uErr, GCPhys, fPageShw, fPageGst)); 985 } 986 else 987 AssertMsgFailed(("PGMGstGetPage rc=%Rrc\n", rc)); 988 } 989 else 990 AssertMsgFailed(("PGMGCGetPage rc=%Rrc\n", rc)); 983 AssertMsgFailed(("PGMGstGetPage rc=%Rrc\n", rc)); 984 } 985 else 986 AssertMsgFailed(("PGMGCGetPage rc=%Rrc\n", rc)); 991 987 # endif /* VBOX_STRICT */ 992 988 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 993 } 994 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c); 995 # endif /* PGM_OUT_OF_SYNC_IN_GC */ 996 } 997 } 998 /** @todo This point is never really reached. Clean up later! */ 989 } 990 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c); 991 992 /** @todo This point is never really reached, is it? */ 999 993 1000 994 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
Note:
See TracChangeset
for help on using the changeset viewer.