- Timestamp:
- Nov 11, 2021 9:53:42 AM (3 years ago)
- svn:sync-xref-src-repo-rev:
- 148201
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r92326 r92350 3598 3598 3599 3599 /** 3600 * Performs a read of guest virtual memory for instruction emulation.3601 *3602 * This will check permissions, raise exceptions and update the access bits.3603 *3604 * The current implementation will bypass all access handlers. It may later be3605 * changed to at least respect MMIO.3606 *3607 *3608 * @returns VBox status code suitable to scheduling.3609 * @retval VINF_SUCCESS if the read was performed successfully.3610 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.3611 *3612 * @param pVCpu The cross context virtual CPU structure of the calling EMT.3613 * @param pCtxCore The context core.3614 * @param pvDst Where to put the bytes we've read.3615 * @param GCPtrSrc The source address.3616 * @param cb The number of bytes to read. Not more than a page.3617 *3618 * @remark This function will dynamically map physical pages in GC. This may unmap3619 * mappings done by the caller. Be careful!3620 */3621 VMMDECL(int) PGMPhysInterpretedRead(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)3622 {3623 NOREF(pCtxCore);3624 PVMCC pVM = pVCpu->CTX_SUFF(pVM);3625 Assert(cb <= PAGE_SIZE);3626 VMCPU_ASSERT_EMT(pVCpu);3627 3628 /** @todo r=bird: This isn't perfect!3629 * -# It's not checking for reserved bits being 1.3630 * -# It's not correctly dealing with the access bit.3631 * -# It's not respecting MMIO memory or any other access handlers.3632 */3633 /*3634 * 1. Translate virtual to physical. This may fault.3635 * 2. Map the physical address.3636 * 3. Do the read operation.3637 * 4. Set access bits if required.3638 */3639 int rc;3640 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);3641 if (cb <= cb1)3642 {3643 /*3644 * Not crossing pages.3645 */3646 RTGCPHYS GCPhys;3647 uint64_t fFlags;3648 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags, &GCPhys);3649 if (RT_SUCCESS(rc))3650 {3651 /** @todo we should check reserved bits ... */3652 PGMPAGEMAPLOCK PgMpLck;3653 void const *pvSrc;3654 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);3655 switch (rc)3656 {3657 case VINF_SUCCESS:3658 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));3659 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);3660 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);3661 break;3662 case VERR_PGM_PHYS_PAGE_RESERVED:3663 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:3664 memset(pvDst, 0xff, cb);3665 break;3666 default:3667 Assert(RT_FAILURE_NP(rc));3668 return rc;3669 }3670 3671 /** @todo access bit emulation isn't 100% correct. */3672 if (!(fFlags & X86_PTE_A))3673 {3674 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);3675 AssertRC(rc);3676 }3677 return VINF_SUCCESS;3678 }3679 }3680 else3681 {3682 /*3683 * Crosses pages.3684 */3685 size_t cb2 = cb - cb1;3686 uint64_t fFlags1;3687 RTGCPHYS GCPhys1;3688 uint64_t fFlags2;3689 RTGCPHYS GCPhys2;3690 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);3691 if (RT_SUCCESS(rc))3692 {3693 rc = PGMGstGetPage(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);3694 if (RT_SUCCESS(rc))3695 {3696 /** @todo we should check reserved bits ... */3697 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));3698 PGMPAGEMAPLOCK PgMpLck;3699 void const *pvSrc1;3700 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);3701 switch (rc)3702 {3703 case VINF_SUCCESS:3704 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);3705 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);3706 break;3707 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:3708 memset(pvDst, 0xff, cb1);3709 break;3710 default:3711 Assert(RT_FAILURE_NP(rc));3712 return rc;3713 }3714 3715 void const *pvSrc2;3716 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);3717 switch (rc)3718 {3719 case VINF_SUCCESS:3720 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);3721 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);3722 break;3723 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:3724 memset((uint8_t *)pvDst + cb1, 0xff, cb2);3725 break;3726 default:3727 Assert(RT_FAILURE_NP(rc));3728 return rc;3729 }3730 3731 if (!(fFlags1 & X86_PTE_A))3732 {3733 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);3734 AssertRC(rc);3735 }3736 if (!(fFlags2 & X86_PTE_A))3737 {3738 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);3739 AssertRC(rc);3740 }3741 return VINF_SUCCESS;3742 }3743 }3744 }3745 3746 /*3747 * Raise a #PF.3748 */3749 uint32_t uErr;3750 3751 /* Get the current privilege level. */3752 uint32_t cpl = CPUMGetGuestCPL(pVCpu);3753 switch (rc)3754 {3755 case VINF_SUCCESS:3756 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;3757 break;3758 3759 case VERR_PAGE_NOT_PRESENT:3760 case VERR_PAGE_TABLE_NOT_PRESENT:3761 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;3762 break;3763 3764 default:3765 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));3766 return rc;3767 }3768 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));3769 rc = TRPMAssertXcptPF(pVCpu, GCPtrSrc, uErr);3770 if (RT_SUCCESS(rc))3771 return VINF_EM_RAW_GUEST_TRAP;3772 return rc;3773 }3774 3775 3776 /**3777 * Performs a read of guest virtual memory for instruction emulation.3778 *3779 * This will check permissions, raise exceptions and update the access bits.3780 *3781 * The current implementation will bypass all access handlers. It may later be3782 * changed to at least respect MMIO.3783 *3784 *3785 * @returns VBox status code suitable to scheduling.3786 * @retval VINF_SUCCESS if the read was performed successfully.3787 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.3788 *3789 * @param pVCpu The cross context virtual CPU structure of the calling EMT.3790 * @param pCtxCore The context core.3791 * @param pvDst Where to put the bytes we've read.3792 * @param GCPtrSrc The source address.3793 * @param cb The number of bytes to read. Not more than a page.3794 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear3795 * an appropriate error status will be returned (no3796 * informational at all).3797 *3798 *3799 * @remarks Takes the PGM lock.3800 * @remarks A page fault on the 2nd page of the access will be raised without3801 * writing the bits on the first page since we're ASSUMING that the3802 * caller is emulating an instruction access.3803 * @remarks This function will dynamically map physical pages in GC. This may3804 * unmap mappings done by the caller. Be careful!3805 */3806 VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,3807 bool fRaiseTrap)3808 {3809 NOREF(pCtxCore);3810 PVMCC pVM = pVCpu->CTX_SUFF(pVM);3811 Assert(cb <= PAGE_SIZE);3812 VMCPU_ASSERT_EMT(pVCpu);3813 3814 /*3815 * 1. Translate virtual to physical. This may fault.3816 * 2. Map the physical address.3817 * 3. Do the read operation.3818 * 4. Set access bits if required.3819 */3820 int rc;3821 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);3822 if (cb <= cb1)3823 {3824 /*3825 * Not crossing pages.3826 */3827 RTGCPHYS GCPhys;3828 uint64_t fFlags;3829 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags, &GCPhys);3830 if (RT_SUCCESS(rc))3831 {3832 if (1) /** @todo we should check reserved bits ... */3833 {3834 const void *pvSrc;3835 PGMPAGEMAPLOCK Lock;3836 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);3837 switch (rc)3838 {3839 case VINF_SUCCESS:3840 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",3841 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));3842 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);3843 PGMPhysReleasePageMappingLock(pVM, &Lock);3844 break;3845 case VERR_PGM_PHYS_PAGE_RESERVED:3846 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:3847 memset(pvDst, 0xff, cb);3848 break;3849 default:3850 AssertMsgFailed(("%Rrc\n", rc));3851 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);3852 return rc;3853 }3854 3855 if (!(fFlags & X86_PTE_A))3856 {3857 /** @todo access bit emulation isn't 100% correct. */3858 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);3859 AssertRC(rc);3860 }3861 return VINF_SUCCESS;3862 }3863 }3864 }3865 else3866 {3867 /*3868 * Crosses pages.3869 */3870 size_t cb2 = cb - cb1;3871 uint64_t fFlags1;3872 RTGCPHYS GCPhys1;3873 uint64_t fFlags2;3874 RTGCPHYS GCPhys2;3875 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);3876 if (RT_SUCCESS(rc))3877 {3878 rc = PGMGstGetPage(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);3879 if (RT_SUCCESS(rc))3880 {3881 if (1) /** @todo we should check reserved bits ... */3882 {3883 const void *pvSrc;3884 PGMPAGEMAPLOCK Lock;3885 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);3886 switch (rc)3887 {3888 case VINF_SUCCESS:3889 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",3890 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));3891 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);3892 PGMPhysReleasePageMappingLock(pVM, &Lock);3893 break;3894 case VERR_PGM_PHYS_PAGE_RESERVED:3895 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:3896 memset(pvDst, 0xff, cb1);3897 break;3898 default:3899 AssertMsgFailed(("%Rrc\n", rc));3900 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);3901 return rc;3902 }3903 3904 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);3905 switch (rc)3906 {3907 case VINF_SUCCESS:3908 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);3909 PGMPhysReleasePageMappingLock(pVM, &Lock);3910 break;3911 case VERR_PGM_PHYS_PAGE_RESERVED:3912 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:3913 memset((uint8_t *)pvDst + cb1, 0xff, cb2);3914 break;3915 default:3916 AssertMsgFailed(("%Rrc\n", rc));3917 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);3918 return rc;3919 }3920 3921 if (!(fFlags1 & X86_PTE_A))3922 {3923 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);3924 AssertRC(rc);3925 }3926 if (!(fFlags2 & X86_PTE_A))3927 {3928 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);3929 AssertRC(rc);3930 }3931 return VINF_SUCCESS;3932 }3933 /* sort out which page */3934 }3935 else3936 GCPtrSrc += cb1; /* fault on 2nd page */3937 }3938 }3939 3940 /*3941 * Raise a #PF if we're allowed to do that.3942 */3943 /* Calc the error bits. */3944 uint32_t cpl = CPUMGetGuestCPL(pVCpu);3945 uint32_t uErr;3946 switch (rc)3947 {3948 case VINF_SUCCESS:3949 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;3950 rc = VERR_ACCESS_DENIED;3951 break;3952 3953 case VERR_PAGE_NOT_PRESENT:3954 case VERR_PAGE_TABLE_NOT_PRESENT:3955 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;3956 break;3957 3958 default:3959 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));3960 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);3961 return rc;3962 }3963 if (fRaiseTrap)3964 {3965 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));3966 rc = TRPMAssertXcptPF(pVCpu, GCPtrSrc, uErr);3967 if (RT_SUCCESS(rc))3968 return VINF_EM_RAW_GUEST_TRAP;3969 return rc;3970 }3971 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));3972 return rc;3973 }3974 3975 3976 /**3977 * Performs a write to guest virtual memory for instruction emulation.3978 *3979 * This will check permissions, raise exceptions and update the dirty and access3980 * bits.3981 *3982 * @returns VBox status code suitable to scheduling.3983 * @retval VINF_SUCCESS if the read was performed successfully.3984 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.3985 *3986 * @param pVCpu The cross context virtual CPU structure of the calling EMT.3987 * @param pCtxCore The context core.3988 * @param GCPtrDst The destination address.3989 * @param pvSrc What to write.3990 * @param cb The number of bytes to write. Not more than a page.3991 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear3992 * an appropriate error status will be returned (no3993 * informational at all).3994 *3995 * @remarks Takes the PGM lock.3996 * @remarks A page fault on the 2nd page of the access will be raised without3997 * writing the bits on the first page since we're ASSUMING that the3998 * caller is emulating an instruction access.3999 * @remarks This function will dynamically map physical pages in GC. This may4000 * unmap mappings done by the caller. Be careful!4001 */4002 VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,4003 size_t cb, bool fRaiseTrap)4004 {4005 NOREF(pCtxCore);4006 Assert(cb <= PAGE_SIZE);4007 PVMCC pVM = pVCpu->CTX_SUFF(pVM);4008 VMCPU_ASSERT_EMT(pVCpu);4009 4010 /*4011 * 1. Translate virtual to physical. This may fault.4012 * 2. Map the physical address.4013 * 3. Do the write operation.4014 * 4. Set access bits if required.4015 */4016 /** @todo Since this method is frequently used by EMInterpret or IOM4017 * upon a write fault to an write access monitored page, we can4018 * reuse the guest page table walking from the \#PF code. */4019 int rc;4020 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);4021 if (cb <= cb1)4022 {4023 /*4024 * Not crossing pages.4025 */4026 RTGCPHYS GCPhys;4027 uint64_t fFlags;4028 rc = PGMGstGetPage(pVCpu, GCPtrDst, &fFlags, &GCPhys);4029 if (RT_SUCCESS(rc))4030 {4031 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */4032 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)4033 && CPUMGetGuestCPL(pVCpu) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */4034 {4035 void *pvDst;4036 PGMPAGEMAPLOCK Lock;4037 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);4038 switch (rc)4039 {4040 case VINF_SUCCESS:4041 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",4042 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));4043 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);4044 PGMPhysReleasePageMappingLock(pVM, &Lock);4045 break;4046 case VERR_PGM_PHYS_PAGE_RESERVED:4047 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:4048 /* bit bucket */4049 break;4050 default:4051 AssertMsgFailed(("%Rrc\n", rc));4052 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);4053 return rc;4054 }4055 4056 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))4057 {4058 /** @todo dirty & access bit emulation isn't 100% correct. */4059 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));4060 AssertRC(rc);4061 }4062 return VINF_SUCCESS;4063 }4064 rc = VERR_ACCESS_DENIED;4065 }4066 }4067 else4068 {4069 /*4070 * Crosses pages.4071 */4072 size_t cb2 = cb - cb1;4073 uint64_t fFlags1;4074 RTGCPHYS GCPhys1;4075 uint64_t fFlags2;4076 RTGCPHYS GCPhys2;4077 rc = PGMGstGetPage(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);4078 if (RT_SUCCESS(rc))4079 {4080 rc = PGMGstGetPage(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);4081 if (RT_SUCCESS(rc))4082 {4083 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */4084 && (fFlags2 & X86_PTE_RW))4085 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)4086 && CPUMGetGuestCPL(pVCpu) <= 2) )4087 {4088 void *pvDst;4089 PGMPAGEMAPLOCK Lock;4090 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);4091 switch (rc)4092 {4093 case VINF_SUCCESS:4094 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",4095 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));4096 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);4097 PGMPhysReleasePageMappingLock(pVM, &Lock);4098 break;4099 case VERR_PGM_PHYS_PAGE_RESERVED:4100 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:4101 /* bit bucket */4102 break;4103 default:4104 AssertMsgFailed(("%Rrc\n", rc));4105 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);4106 return rc;4107 }4108 4109 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);4110 switch (rc)4111 {4112 case VINF_SUCCESS:4113 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);4114 PGMPhysReleasePageMappingLock(pVM, &Lock);4115 break;4116 case VERR_PGM_PHYS_PAGE_RESERVED:4117 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:4118 /* bit bucket */4119 break;4120 default:4121 AssertMsgFailed(("%Rrc\n", rc));4122 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);4123 return rc;4124 }4125 4126 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))4127 {4128 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));4129 AssertRC(rc);4130 }4131 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))4132 {4133 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));4134 AssertRC(rc);4135 }4136 return VINF_SUCCESS;4137 }4138 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)4139 GCPtrDst += cb1; /* fault on the 2nd page. */4140 rc = VERR_ACCESS_DENIED;4141 }4142 else4143 GCPtrDst += cb1; /* fault on the 2nd page. */4144 }4145 }4146 4147 /*4148 * Raise a #PF if we're allowed to do that.4149 */4150 /* Calc the error bits. */4151 uint32_t uErr;4152 uint32_t cpl = CPUMGetGuestCPL(pVCpu);4153 switch (rc)4154 {4155 case VINF_SUCCESS:4156 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;4157 rc = VERR_ACCESS_DENIED;4158 break;4159 4160 case VERR_ACCESS_DENIED:4161 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;4162 break;4163 4164 case VERR_PAGE_NOT_PRESENT:4165 case VERR_PAGE_TABLE_NOT_PRESENT:4166 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;4167 break;4168 4169 default:4170 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));4171 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);4172 return rc;4173 }4174 if (fRaiseTrap)4175 {4176 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));4177 rc = TRPMAssertXcptPF(pVCpu, GCPtrDst, uErr);4178 if (RT_SUCCESS(rc))4179 return VINF_EM_RAW_GUEST_TRAP;4180 return rc;4181 }4182 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));4183 return rc;4184 }4185 4186 4187 /**4188 3600 * Return the page type of the specified physical address. 4189 3601 *
Note:
See TracChangeset
for help on using the changeset viewer.