Changeset 93922 in vbox
- Timestamp:
- Feb 24, 2022 3:14:31 PM (3 years ago)
- Location:
- trunk
- Files:
-
- 22 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r93830 r93922 1597 1597 VMM_INT_DECL(bool) CPUMIsGuestVmxEptPagingEnabled(PCVMCPUCC pVCpu); 1598 1598 VMM_INT_DECL(bool) CPUMIsGuestVmxEptPaePagingEnabled(PCVMCPUCC pVCpu); 1599 VMM_INT_DECL(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu); 1599 1600 /** @} */ 1600 1601 … … 2335 2336 * @param pCtx Current CPU context. 2336 2337 */ 2337 DECLINLINE(uint64_t) CPUMGetGuestVmxApicAccessPageAddr (PCCPUMCTX pCtx)2338 DECLINLINE(uint64_t) CPUMGetGuestVmxApicAccessPageAddrEx(PCCPUMCTX pCtx) 2338 2339 { 2339 2340 Assert(CPUMIsGuestInVmxNonRootMode(pCtx)); -
trunk/include/VBox/vmm/iem.h
r93650 r93922 383 383 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 384 384 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvept(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo); 385 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptViolation(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo); 386 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo); 385 387 # endif 386 388 #endif -
trunk/include/VBox/vmm/pgm.h
r93716 r93922 508 508 RTGCPHYS GCPhysNested; 509 509 510 /** The physical address that is the result of the walk (output). 511 * @remarks This is page aligned and only valid if fSucceeded is set. */ 510 /** The physical address that is the result of the walk (output). */ 512 511 RTGCPHYS GCPhys; 513 512 … … 613 612 VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3); 614 613 VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce); 615 VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode );614 VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode, bool fForce); 616 615 VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu); 617 616 VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu); … … 949 948 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr); 950 949 VMMR0_INT_DECL(int) PGMR0PoolGrow(PGVM pGVM, VMCPUID idCpu); 950 951 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 952 VMMR0DECL(VBOXSTRICTRC) PGMR0NestedTrap0eHandlerNestedPaging(PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr, 953 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNested, 954 bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk); 955 # endif 951 956 /** @} */ 952 957 #endif /* IN_RING0 */ -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r93725 r93922 3061 3061 } 3062 3062 3063 3064 /** 3065 * Returns the guest-physical address of the APIC-access page when executing a 3066 * nested-guest. 3067 * 3068 * @returns The APIC-access page guest-physical address. 3069 * @param pVCpu The cross context virtual CPU structure. 3070 */ 3071 VMM_INT_DECL(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu) 3072 { 3073 return CPUMGetGuestVmxApicAccessPageAddrEx(&pVCpu->cpum.s.Guest); 3074 } 3075 -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r93906 r93922 16366 16366 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 16367 16367 } 16368 16369 16370 /** 16371 * Interface for HM and EM to emulate a VM-exit due to an EPT violation. 16372 * 16373 * @returns Strict VBox status code. 16374 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 16375 * @param pExitInfo Pointer to the VM-exit information. 16376 * @param pExitEventInfo Pointer to the VM-exit event information. 16377 * @thread EMT(pVCpu) 16378 */ 16379 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptViolation(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, 16380 PCVMXVEXITEVENTINFO pExitEventInfo) 16381 { 16382 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI); 16383 16384 iemInitExec(pVCpu, false /*fBypassHandlers*/); 16385 VBOXSTRICTRC rcStrict = iemVmxVmexitEptViolationWithInfo(pVCpu, pExitInfo, pExitEventInfo); 16386 Assert(!pVCpu->iem.s.cActiveMappings); 16387 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 16388 } 16389 16390 16391 /** 16392 * Interface for HM and EM to emulate a VM-exit due to an EPT misconfiguration. 16393 * 16394 * @returns Strict VBox status code. 16395 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 16396 * @param GCPhysAddr The nested-guest physical address causing the EPT 16397 * misconfiguration. 16398 * @param pExitEventInfo Pointer to the VM-exit event information. 16399 * @thread EMT(pVCpu) 16400 */ 16401 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo) 16402 { 16403 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI); 16404 16405 iemInitExec(pVCpu, false /*fBypassHandlers*/); 16406 VBOXSTRICTRC rcStrict = iemVmxVmexitEptMisconfigWithInfo(pVCpu, GCPhysAddr, pExitEventInfo); 16407 Assert(!pVCpu->iem.s.cActiveMappings); 16408 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 16409 } 16410 16368 16411 # endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */ 16369 16412 … … 16384 16427 { 16385 16428 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS)); 16386 Assert(CPUMGetGuestVmxApicAccessPageAddr(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase); 16387 16388 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here? 16389 * Currently they will go through as read accesses. */ 16390 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ; 16429 Assert(CPUMGetGuestVmxApicAccessPageAddrEx(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase); 16430 16431 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_DATA_W : IEM_ACCESS_DATA_R; 16391 16432 uint16_t const offAccess = GCPhysFault & GUEST_PAGE_OFFSET_MASK; 16392 16433 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess); … … 16398 16439 } 16399 16440 16400 Log (("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at%#RGp\n", GCPhysAccessBase));16441 LogFunc(("Accessed outside VMX non-root mode, deregistering page handler for %#RGp\n", GCPhysAccessBase)); 16401 16442 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase); 16402 16443 if (RT_FAILURE(rc)) … … 16407 16448 } 16408 16449 16450 16451 # ifndef IN_RING3 16452 /** 16453 * @callback_method_impl{FNPGMRZPHYSPFHANDLER, 16454 * \#PF access handler callback for guest VMX APIC-access page.} 16455 */ 16456 DECLCALLBACK(VBOXSTRICTRC) iemVmxApicAccessPagePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, 16457 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser) 16458 16459 { 16460 RT_NOREF4(pVM, pRegFrame, pvFault, uUser); 16461 16462 /** @todo We lack information about such as the current instruction length, IDT 16463 * vectoring info etc. These need to be queried from HMR0. */ 16464 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; 16465 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu))) 16466 { 16467 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS)); 16468 Assert(CPUMGetGuestVmxApicAccessPageAddrEx(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase); 16469 16470 uint32_t fAccess; 16471 if (uErr & X86_TRAP_PF_ID) 16472 fAccess = IEM_ACCESS_INSTRUCTION; 16473 else if (uErr & X86_TRAP_PF_RW) 16474 fAccess = IEM_ACCESS_DATA_W; 16475 else 16476 fAccess = IEM_ACCESS_DATA_R; 16477 16478 uint16_t const offAccess = GCPhysFault & GUEST_PAGE_OFFSET_MASK; 16479 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, 0 /* cbAccess */, fAccess); 16480 if (fIntercept) 16481 { 16482 /** @todo Once HMR0 interface for querying VMXTRANSIENT info is available, use 16483 * iemVmxVmexitApicAccessWithInfo instead. This is R0-only code anyway. */ 16484 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess); 16485 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 16486 } 16487 16488 /* The access isn't intercepted, which means it needs to be virtualized. */ 16489 return VINF_EM_RAW_EMULATE_INSTR; 16490 } 16491 16492 LogFunc(("Accessed outside VMX non-root mode, deregistering page handler for %#RGp\n", GCPhysAccessBase)); 16493 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase); 16494 if (RT_FAILURE(rc)) 16495 return rc; 16496 16497 return VINF_SUCCESS; 16498 } 16499 # endif /* !IN_RING3 */ 16409 16500 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 16501 16410 16502 16411 16503 #ifdef IN_RING3 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r93847 r93922 7560 7560 * See Intel spec. 29.4.4 "Instruction-Specific Considerations". 7561 7561 */ 7562 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem );7562 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA); 7563 7563 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE 7564 7564 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR) … … 8218 8218 * See Intel spec. 29.4.4 "Instruction-Specific Considerations". 8219 8219 */ 8220 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem );8220 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA); 8221 8221 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE 8222 8222 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR) -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r93650 r93922 3711 3711 * This need not be page aligned (e.g. nested-guest in real 3712 3712 * mode). 3713 * @param pExitEventInfo Pointer to the VM-exit event information. Optional, can 3714 * be NULL. 3715 */ 3716 IEM_STATIC VBOXSTRICTRC iemVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo) 3717 { 3718 if (pExitEventInfo) 3719 { 3720 iemVmxVmcsSetExitIntInfo(pVCpu, pExitEventInfo->uExitIntInfo); 3721 iemVmxVmcsSetExitIntErrCode(pVCpu, pExitEventInfo->uExitIntErrCode); 3722 iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo); 3723 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode); 3724 } 3725 3713 */ 3714 IEM_STATIC VBOXSTRICTRC iemVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr) 3715 { 3716 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, GCPhysAddr); 3717 return iemVmxVmexit(pVCpu, VMX_EXIT_EPT_MISCONFIG, 0 /* u64ExitQual */); 3718 } 3719 3720 3721 /** 3722 * VMX VM-exit handler for EPT misconfiguration. 3723 * 3724 * This is intended for EPT misconfigurations where the caller provides all the 3725 * relevant VM-exit information. 3726 * 3727 * @param pVCpu The cross context virtual CPU structure. 3728 * @param GCPhysAddr The physical address causing the EPT misconfiguration. 3729 * This need not be page aligned (e.g. nested-guest in real 3730 * mode). 3731 * @param pExitEventInfo Pointer to the VM-exit event information. 3732 */ 3733 IEM_STATIC VBOXSTRICTRC iemVmxVmexitEptMisconfigWithInfo(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo) 3734 { 3735 Assert(pExitEventInfo); 3736 Assert(!VMX_EXIT_INT_INFO_IS_VALID(pExitEventInfo->uExitIntInfo)); 3737 iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo); 3738 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode); 3726 3739 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, GCPhysAddr); 3727 3740 return iemVmxVmexit(pVCpu, VMX_EXIT_EPT_MISCONFIG, 0 /* u64ExitQual */); … … 3745 3758 */ 3746 3759 IEM_STATIC VBOXSTRICTRC iemVmxVmexitEptViolation(PVMCPUCC pVCpu, uint32_t fAccess, uint32_t fSlatFail, uint64_t fEptAccess, 3747 RTGCPHYS GCPhysAddr, bool fLinearAddrValid, uint64_t GCPtrAddr, uint8_t cbInstr) 3760 RTGCPHYS GCPhysAddr, bool fIsLinearAddrValid, uint64_t GCPtrAddr, 3761 uint8_t cbInstr) 3748 3762 { 3749 3763 /* … … 3752 3766 * While we can leave it this way, it's preferrable to zero it for consistency. 3753 3767 */ 3754 Assert(f LinearAddrValid || GCPtrAddr == 0);3768 Assert(fIsLinearAddrValid || GCPtrAddr == 0); 3755 3769 3756 3770 uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps; 3757 uint8_t const fSupportsAccessDirty = fCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY; 3758 3759 uint8_t const fDataRead = ((fAccess & IEM_ACCESS_DATA_R) == IEM_ACCESS_DATA_R) | fSupportsAccessDirty; 3760 uint8_t const fDataWrite = ((fAccess & IEM_ACCESS_DATA_RW) == IEM_ACCESS_DATA_RW) | fSupportsAccessDirty; 3761 uint8_t const fInstrFetch = (fAccess & IEM_ACCESS_INSTRUCTION) == IEM_ACCESS_INSTRUCTION; 3762 bool const fEptRead = RT_BOOL(fEptAccess & EPT_E_READ); 3763 bool const fEptWrite = RT_BOOL(fEptAccess & EPT_E_WRITE); 3764 bool const fEptExec = RT_BOOL(fEptAccess & EPT_E_EXECUTE); 3765 bool const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret; 3766 bool const fLinearToPhysAddr = fLinearAddrValid & RT_BOOL(fSlatFail & IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR); 3771 bool const fSupportsAccessDirty = RT_BOOL(fCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY); 3772 3773 uint32_t const fDataRdMask = IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_READ; 3774 uint32_t const fDataWrMask = IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE; 3775 uint32_t const fInstrMask = IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_EXEC; 3776 bool const fDataRead = ((fAccess & fDataRdMask) == IEM_ACCESS_DATA_R) | fSupportsAccessDirty; 3777 bool const fDataWrite = ((fAccess & fDataWrMask) == IEM_ACCESS_DATA_W) | fSupportsAccessDirty; 3778 bool const fInstrFetch = ((fAccess & fInstrMask) == IEM_ACCESS_INSTRUCTION); 3779 bool const fEptRead = RT_BOOL(fEptAccess & EPT_E_READ); 3780 bool const fEptWrite = RT_BOOL(fEptAccess & EPT_E_WRITE); 3781 bool const fEptExec = RT_BOOL(fEptAccess & EPT_E_EXECUTE); 3782 bool const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret; 3783 bool const fIsLinearToPhysAddr = fIsLinearAddrValid & RT_BOOL(fSlatFail & IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR); 3767 3784 3768 3785 uint64_t const u64ExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ACCESS_READ, fDataRead) … … 3772 3789 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ENTRY_WRITE, fEptWrite) 3773 3790 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ENTRY_EXECUTE, fEptExec) 3774 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_LINEAR_ADDR_VALID, f LinearAddrValid)3775 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_LINEAR_TO_PHYS_ADDR, f LinearToPhysAddr)3791 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_LINEAR_ADDR_VALID, fIsLinearAddrValid) 3792 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_LINEAR_TO_PHYS_ADDR, fIsLinearToPhysAddr) 3776 3793 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_NMI_UNBLOCK_IRET, fNmiUnblocking); 3777 3794 … … 3807 3824 PCVMXVEXITEVENTINFO pExitEventInfo) 3808 3825 { 3826 Assert(pExitInfo); 3827 Assert(pExitEventInfo); 3809 3828 Assert(pExitInfo->uReason == VMX_EXIT_EPT_VIOLATION); 3810 3811 iemVmxVmcsSetExitIntInfo(pVCpu, pExitEventInfo->uExitIntInfo); 3812 iemVmxVmcsSetExitIntErrCode(pVCpu, pExitEventInfo->uExitIntErrCode); 3829 Assert(!VMX_EXIT_INT_INFO_IS_VALID(pExitEventInfo->uExitIntInfo)); 3830 3813 3831 iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo); 3814 3832 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode); … … 3818 3836 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, pExitInfo->u64GuestLinearAddr); 3819 3837 else 3820 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, 3838 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, 0); 3821 3839 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr); 3822 3840 return iemVmxVmexit(pVCpu, VMX_EXIT_EPT_VIOLATION, pExitInfo->u64Qual); … … 3834 3852 * applicable. 3835 3853 */ 3836 IEM_STATIC VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, 3837 uint8_t cbInstr) 3854 IEM_STATIC VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr) 3838 3855 { 3839 3856 Assert(pWalk->fIsSlat); … … 3852 3869 Log(("EptMisconfig: cs:rip=%x:%#RX64 fAccess=%#RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fAccess)); 3853 3870 Assert(pWalk->fFailed & PGM_WALKFAIL_EPT_MISCONFIG); 3854 return iemVmxVmexitEptMisconfig(pVCpu, pWalk->GCPhysNested , NULL /* pExitEventInfo */);3871 return iemVmxVmexitEptMisconfig(pVCpu, pWalk->GCPhysNested); 3855 3872 } 3856 3873 … … 3861 3878 * @param pVCpu The cross context virtual CPU structure. 3862 3879 * @param offAccess The offset of the register being accessed. 3863 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or 3864 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION). 3880 * @param fAccess The type of access, see IEM_ACCESS_XXX. 3865 3881 */ 3866 3882 IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPUCC pVCpu, uint16_t offAccess, uint32_t fAccess) 3867 3883 { 3868 Assert((fAccess & IEM_ACCESS_TYPE_READ) || (fAccess & IEM_ACCESS_TYPE_WRITE) || (fAccess & IEM_ACCESS_INSTRUCTION));3869 3870 3884 VMXAPICACCESS enmAccess; 3871 3885 bool const fInEventDelivery = IEMGetCurrentXcpt(pVCpu, NULL, NULL, NULL, NULL); 3872 3886 if (fInEventDelivery) 3873 3887 enmAccess = VMXAPICACCESS_LINEAR_EVENT_DELIVERY; 3874 else if ( fAccess &IEM_ACCESS_INSTRUCTION)3888 else if ((fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == IEM_ACCESS_INSTRUCTION) 3875 3889 enmAccess = VMXAPICACCESS_LINEAR_INSTR_FETCH; 3876 3890 else if (fAccess & IEM_ACCESS_TYPE_WRITE) … … 4127 4141 * @param offAccess The offset of the register being accessed. 4128 4142 * @param cbAccess The size of the access in bytes. 4129 * @param fAccess The type of access (must be IEM_ACCESS_TYPE_READ or 4130 * IEM_ACCESS_TYPE_WRITE). 4143 * @param fAccess The type of access, see IEM_ACCESS_XXX. 4131 4144 * 4132 4145 * @remarks This must not be used for MSR-based APIC-access page accesses! … … 4136 4149 { 4137 4150 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 4138 Assert(fAccess == IEM_ACCESS_TYPE_READ || fAccess == IEM_ACCESS_TYPE_WRITE);4139 4151 4140 4152 /* … … 4297 4309 * 4298 4310 * @param pVCpu The cross context virtual CPU structure. 4299 * @param pGCPhysAccess Pointer to the guest-physical address used. 4300 */ 4301 IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess) 4311 * @param pGCPhysAccess Pointer to the guest-physical address accessed. 4312 * @param fAccess The type of access, see IEM_ACCESS_XXX. 4313 */ 4314 IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess, uint32_t fAccess) 4302 4315 { 4303 4316 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS); … … 4311 4324 { 4312 4325 uint16_t const offAccess = *pGCPhysAccess & GUEST_PAGE_OFFSET_MASK; 4313 uint32_t const fAccess = IEM_ACCESS_TYPE_READ;4314 4326 uint16_t const cbAccess = 1; 4315 4327 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess); … … 4338 4350 * @param pvData Pointer to the data being written or where to store the data 4339 4351 * being read. 4340 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or 4341 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION). 4352 * @param fAccess The type of access, see IEM_ACCESS_XXX. 4342 4353 */ 4343 4354 IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, … … 4346 4357 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS); 4347 4358 Assert(pvData); 4348 Assert( (fAccess & IEM_ACCESS_TYPE_READ)4349 || (fAccess & IEM_ACCESS_TYPE_WRITE)4350 || (fAccess & IEM_ACCESS_INSTRUCTION));4351 4359 4352 4360 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess); … … 4389 4397 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page". 4390 4398 */ 4399 Assert(fAccess & IEM_ACCESS_TYPE_READ); 4400 4391 4401 Assert(cbAccess <= 4); 4392 4402 Assert(offAccess < XAPIC_OFF_END + 4); -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r93905 r93922 736 736 # define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL } 737 737 # define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \ 738 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler) }738 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler) } 739 739 740 740 #elif !defined(IN_RING3) && defined(VBOX_STRICT) 741 741 # define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL } 742 742 # define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \ 743 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm( AssertCR3) }743 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler), Nm(AssertCR3) } 744 744 745 745 #elif defined(IN_RING3) && !defined(VBOX_STRICT) … … 1907 1907 case PGMSLAT_EPT: 1908 1908 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT; 1909 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, false /* fIsLinearaddrValid */, NIL_RTGCPTR, pWalk,1910 &pGstWalk->u.Ept);1909 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, false /* fIsLinearaddrValid */, 0 /* GCPtrNested */, 1910 pWalk, &pGstWalk->u.Ept); 1911 1911 1912 1912 default: … … 2991 2991 /* Flush the TLB */ 2992 2992 PGM_INVL_VCPU_TLBS(pVCpu); 2993 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode );2993 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode, fForce); 2994 2994 } 2995 2995 … … 3226 3226 * @param enmGuestMode The new guest mode. This is assumed to be different from 3227 3227 * the current mode. 3228 */ 3229 VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode) 3228 * @param fForce Whether to force a shadow paging mode change. 3229 */ 3230 VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode, bool fForce) 3230 3231 { 3231 3232 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode))); … … 3235 3236 * Calc the shadow mode and switcher. 3236 3237 */ 3237 PGMMODE enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode); 3238 PGMMODE const enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode); 3239 bool const fShadowModeChanged = enmShadowMode != pVCpu->pgm.s.enmShadowMode || fForce; 3238 3240 3239 3241 /* … … 3241 3243 */ 3242 3244 /* shadow */ 3243 if ( enmShadowMode != pVCpu->pgm.s.enmShadowMode)3244 { 3245 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", 3245 if (fShadowModeChanged) 3246 { 3247 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode))); 3246 3248 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData; 3247 3249 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData) … … 3310 3312 * Enter new shadow mode (if changed). 3311 3313 */ 3312 if ( enmShadowMode != pVCpu->pgm.s.enmShadowMode)3314 if (fShadowModeChanged) 3313 3315 { 3314 3316 pVCpu->pgm.s.enmShadowMode = enmShadowMode; … … 3580 3582 } 3581 3583 } 3582 #endif 3584 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */ 3583 3585 3584 3586 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r93905 r93922 37 37 #ifndef IN_RING3 38 38 PGM_BTH_DECL(int, Trap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken); 39 PGM_BTH_DECL(int, NestedTrap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNested, 40 bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk, bool *pfLockTaken); 39 41 #endif 40 42 PGM_BTH_DECL(int, InvalidatePage)(PVMCPUCC pVCpu, RTGCPTR GCPtrPage); … … 966 968 # else /* Nested paging, EPT except PGM_GST_TYPE = PROT, NONE. */ 967 969 NOREF(uErr); NOREF(pRegFrame); NOREF(pvFault); 970 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_SHW_TYPE, PGM_GST_TYPE)); 971 return VERR_PGM_NOT_USED_IN_MODE; 972 # endif 973 } 974 975 976 /** 977 * Nested \#PF handler for nested-guest hardware-assisted execution using nested 978 * paging. 979 * 980 * @returns VBox status code (appropriate for trap handling and GC return). 981 * @param pVCpu The cross context virtual CPU structure. 982 * @param uErr The fault error (X86_TRAP_PF_*). 983 * @param pRegFrame The register frame. 984 * @param GCPhysNested The nested-guest physical address being accessed. 985 * @param fIsLinearAddrValid Whether translation of a nested-guest linear address 986 * caused this fault. If @c false, GCPtrNested must be 987 * 0. 988 * @param GCPtrNested The nested-guest linear address that caused this 989 * fault. 990 * @param pWalk The guest page table walk result. 991 * @param pfLockTaken Where to store whether the PGM lock is still held 992 * when this function completes. 993 */ 994 PGM_BTH_DECL(int, NestedTrap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNested, 995 bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk, bool *pfLockTaken) 996 { 997 *pfLockTaken = false; 998 # if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) \ 999 && ( PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_32BIT \ 1000 || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \ 1001 && PGM_SHW_TYPE == PGM_TYPE_EPT 1002 1003 Assert(CPUMIsGuestVmxEptPagingEnabled(pVCpu)); 1004 1005 /* 1006 * Walk the guest EPT tables and check if it's an EPT violation or misconfiguration. 1007 */ 1008 PGMPTWALKGST GstWalkAll; 1009 int rc = pgmGstSlatWalk(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &GstWalkAll); 1010 if (RT_FAILURE(rc)) 1011 return rc; 1012 1013 Assert(GstWalkAll.enmType == PGMPTWALKGSTTYPE_EPT); 1014 Assert(pWalk->fSucceeded); 1015 Assert(pWalk->fEffective & PGM_PTATTRS_R_MASK); 1016 Assert(pWalk->fIsSlat); 1017 1018 if (uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US | X86_TRAP_PF_ID)) 1019 { 1020 if ( ( (uErr & X86_TRAP_PF_RW) 1021 && !(pWalk->fEffective & PGM_PTATTRS_W_MASK) 1022 && ( (uErr & X86_TRAP_PF_US) 1023 || CPUMIsGuestR0WriteProtEnabled(pVCpu)) ) 1024 || ((uErr & X86_TRAP_PF_US) && !(pWalk->fEffective & PGM_PTATTRS_US_MASK)) 1025 || ((uErr & X86_TRAP_PF_ID) && (pWalk->fEffective & PGM_PTATTRS_NX_MASK)) 1026 ) 1027 return VERR_ACCESS_DENIED; 1028 } 1029 1030 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 1031 RTGCPHYS const GCPhysFault = PGM_A20_APPLY(pVCpu, GCPhysNested & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK); 1032 GSTPDE const PdeSrcDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A }; 1033 1034 /* Take the big lock now. */ 1035 *pfLockTaken = true; 1036 PGM_LOCK_VOID(pVM); 1037 1038 /* 1039 * Check if this is an APIC-access page access (VMX specific). 1040 */ 1041 RTGCPHYS const GCPhysApicAccess = CPUMGetGuestVmxApicAccessPageAddr(pVCpu); 1042 if ((pWalk->GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK) == GCPhysApicAccess) 1043 { 1044 PPGMPAGE pPage; 1045 rc = pgmPhysGetPageEx(pVM, PGM_A20_APPLY(pVCpu, GCPhysApicAccess), &pPage); 1046 if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) 1047 { 1048 rc = VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pWalk->GCPhys, pPage, 1049 pfLockTaken)); 1050 return rc; 1051 } 1052 } 1053 1054 # ifdef PGM_WITH_MMIO_OPTIMIZATIONS 1055 /* 1056 * Check if this is an MMIO access. 1057 */ 1058 if (uErr & X86_TRAP_PF_RSVD) 1059 { 1060 PPGMPAGE pPage; 1061 rc = pgmPhysGetPageEx(pVM, PGM_A20_APPLY(pVCpu, (RTGCPHYS)GCPhysFault), &pPage); 1062 if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) 1063 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, GCPhysFault, pPage, 1064 pfLockTaken)); 1065 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, GCPhysFault, 1, uErr); 1066 AssertRC(rc); 1067 HMInvalidatePhysPage(pVM, GCPhysFault); 1068 return rc; /* Restart with the corrected entry. */ 1069 } 1070 # endif /* PGM_WITH_MMIO_OPTIMIZATIONS */ 1071 1072 /* 1073 * Fetch the guest EPT page directory pointer. 1074 */ 1075 const unsigned iPDDst = ((GCPhysFault >> SHW_PD_SHIFT) & SHW_PD_MASK); 1076 PEPTPD pPDDst; 1077 rc = pgmShwGetEPTPDPtr(pVCpu, GCPhysFault, NULL /* ppPdpt */, &pPDDst); 1078 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS); 1079 Assert(pPDDst); 1080 1081 /* 1082 * A common case is the not-present error caused by lazy page table syncing. 1083 * 1084 * It is IMPORTANT that we weed out any access to non-present shadow PDEs 1085 * here so we can safely assume that the shadow PT is present when calling 1086 * SyncPage later. 1087 * 1088 * On failure, we ASSUME that SyncPT is out of memory or detected some kind 1089 * of mapping conflict and defer to SyncCR3 in R3. 1090 * (Again, we do NOT support access handlers for non-present guest pages.) 1091 * 1092 */ 1093 if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */ 1094 && !SHW_PDE_IS_P(pPDDst->a[iPDDst])) 1095 { 1096 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2SyncPT; }); 1097 LogFlow(("=>SyncPT GCPhysFault=%RGp\n", GCPhysFault)); 1098 rc = PGM_BTH_NAME(SyncPT)(pVCpu, 0 /* iPDSrc */, NULL /* pPDSrc */, GCPhysFault); 1099 if (RT_SUCCESS(rc)) 1100 return rc; 1101 Log(("SyncPT: %RGp failed!! rc=%Rrc\n", GCPhysFault, rc)); 1102 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */ 1103 return VINF_PGM_SYNC_CR3; 1104 } 1105 1106 /* 1107 * Check if this fault address is flagged for special treatment, 1108 * which means we'll have to figure out the physical address and 1109 * check flags associated with it. 1110 * 1111 * ASSUME that we can limit any special access handling to pages 1112 * in page tables which the guest believes to be present. 1113 */ 1114 PPGMPAGE pPage; 1115 rc = pgmPhysGetPageEx(pVM, GCPhysFault, &pPage); 1116 if (RT_FAILURE(rc)) 1117 { 1118 /* 1119 * When the guest accesses invalid physical memory (e.g. probing 1120 * of RAM or accessing a remapped MMIO range), then we'll fall 1121 * back to the recompiler to emulate the instruction. 1122 */ 1123 LogFlow(("PGM #PF: pgmPhysGetPageEx(%RGp) failed with %Rrc\n", GCPhysFault, rc)); 1124 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eHandlersInvalid); 1125 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2InvalidPhys; }); 1126 return VINF_EM_RAW_EMULATE_INSTR; 1127 } 1128 1129 /* 1130 * Any handlers for this page? 1131 */ 1132 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 1133 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, GCPhysFault, pPage, 1134 pfLockTaken)); 1135 1136 /* 1137 * We are here only if page is present in Guest page tables and 1138 * trap is not handled by our handlers. 1139 * 1140 * Check it for page out-of-sync situation. 1141 */ 1142 if (!(uErr & X86_TRAP_PF_P)) 1143 { 1144 /* 1145 * Page is not present in our page tables. Try to sync it! 1146 */ 1147 if (uErr & X86_TRAP_PF_US) 1148 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncUser)); 1149 else /* supervisor */ 1150 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncSupervisor)); 1151 1152 if (PGM_PAGE_IS_BALLOONED(pPage)) 1153 { 1154 /* Emulate reads from ballooned pages as they are not present in 1155 our shadow page tables. (Required for e.g. Solaris guests; soft 1156 ecc, random nr generator.) */ 1157 rc = VBOXSTRICTRC_TODO(PGMInterpretInstruction(pVM, pVCpu, pRegFrame, GCPhysFault)); 1158 LogFlow(("PGM: PGMInterpretInstruction balloon -> rc=%d pPage=%R[pgmpage]\n", rc, pPage)); 1159 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncBallloon)); 1160 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Ballooned; }); 1161 return rc; 1162 } 1163 1164 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, GCPhysFault, PGM_SYNC_NR_PAGES, uErr); 1165 if (RT_SUCCESS(rc)) 1166 { 1167 /* The page was successfully synced, return to the guest. */ 1168 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2OutOfSync; }); 1169 return VINF_SUCCESS; 1170 } 1171 } 1172 else 1173 { 1174 /* 1175 * Write protected pages are made writable when the guest makes the 1176 * first write to it. This happens for pages that are shared, write 1177 * monitored or not yet allocated. 1178 * 1179 * We may also end up here when CR0.WP=0 in the guest. 1180 * 1181 * Also, a side effect of not flushing global PDEs are out of sync 1182 * pages due to physical monitored regions, that are no longer valid. 1183 * Assume for now it only applies to the read/write flag. 1184 */ 1185 if (uErr & X86_TRAP_PF_RW) 1186 { 1187 /* 1188 * Check if it is a read-only page. 1189 */ 1190 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED) 1191 { 1192 Assert(!PGM_PAGE_IS_ZERO(pPage)); 1193 AssertFatalMsg(!PGM_PAGE_IS_BALLOONED(pPage), ("Unexpected ballooned page at %RGp\n", GCPhysFault)); 1194 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2MakeWritable; }); 1195 1196 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhysFault); 1197 if (rc != VINF_SUCCESS) 1198 { 1199 AssertMsg(rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("%Rrc\n", rc)); 1200 return rc; 1201 } 1202 if (RT_UNLIKELY(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))) 1203 return VINF_EM_NO_MEMORY; 1204 } 1205 1206 if (uErr & X86_TRAP_PF_US) 1207 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncUserWrite)); 1208 else 1209 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncSupervisorWrite)); 1210 1211 /* 1212 * Sync the page. 1213 * 1214 * Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the 1215 * page is not present, which is not true in this case. 1216 */ 1217 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, GCPhysFault, 1, uErr); 1218 if (RT_SUCCESS(rc)) 1219 { 1220 /* 1221 * Page was successfully synced, return to guest but invalidate 1222 * the TLB first as the page is very likely to be in it. 1223 */ 1224 HMInvalidatePhysPage(pVM, GCPhysFault); 1225 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2OutOfSyncHndObs; }); 1226 return VINF_SUCCESS; 1227 } 1228 } 1229 } 1230 1231 /* 1232 * If we get here it is because something failed above, i.e. most like guru meditation time. 1233 */ 1234 LogRelFunc(("returns rc=%Rrc GCPhysFault=%RGp uErr=%RX64 cs:rip=%04x:%08RX64\n", rc, GCPhysFault, (uint64_t)uErr, 1235 pRegFrame->cs.Sel, pRegFrame->rip)); 1236 return rc; 1237 1238 # else 1239 RT_NOREF7(pVCpu, uErr, pRegFrame, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk); 968 1240 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_SHW_TYPE, PGM_GST_TYPE)); 969 1241 return VERR_PGM_NOT_USED_IN_MODE; -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r93572 r93922 376 376 pWalk->fSucceeded = true; 377 377 pWalk->GCPtr = GCPtr; 378 pWalk->GCPhys = SlatWalk.GCPhys & PAGE_BASE_GC_MASK;378 pWalk->GCPhys = SlatWalk.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; 379 379 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US; 380 380 } -
trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h
r93554 r93922 21 21 if (!(uEntry & EPT_E_READ)) 22 22 { 23 if (uEntry & EPT_E_WRITE) 23 Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt); 24 Assert(!RT_BF_GET(pVCpu->pgm.s.uEptVpidCapMsr, VMX_BF_EPT_VPID_CAP_EXEC_ONLY)); 25 NOREF(pVCpu); 26 if (uEntry & (EPT_E_WRITE | EPT_E_EXECUTE)) 24 27 return false; 25 Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);26 if ( !RT_BF_GET(pVCpu->pgm.s.uEptVpidCapMsr, VMX_BF_EPT_VPID_CAP_EXEC_ONLY)27 && (uEntry & EPT_E_EXECUTE))28 return false;29 28 } 30 29 return true; … … 35 34 { 36 35 Assert(uLevel <= 3 && uLevel >= 1); NOREF(uLevel); 37 uint64_t const fEptMemTypeMask = uEntry & VMX_BF_EPT_PT_MEMTYPE_MASK; 38 if ( fEptMemTypeMask == EPT_E_MEMTYPE_INVALID_2 39 || fEptMemTypeMask == EPT_E_MEMTYPE_INVALID_3 40 || fEptMemTypeMask == EPT_E_MEMTYPE_INVALID_7) 41 return false; 42 return true; 36 uint8_t const fEptMemTypeMask = uEntry & VMX_BF_EPT_PT_MEMTYPE_MASK; 37 switch (fEptMemTypeMask) 38 { 39 case EPT_E_MEMTYPE_WB: 40 case EPT_E_MEMTYPE_UC: 41 case EPT_E_MEMTYPE_WP: 42 case EPT_E_MEMTYPE_WT: 43 case EPT_E_MEMTYPE_WC: 44 return true; 45 } 46 return false; 43 47 } 44 48 … … 88 92 * @param GCPhysNested The nested-guest physical address to walk. 89 93 * @param fIsLinearAddrValid Whether the linear-address in @c GCPtrNested caused 90 * this page walk. If this is false, @c GCPtrNested 91 * must be 0. 94 * this page walk. 92 95 * @param GCPtrNested The nested-guest linear address that caused this 93 * page walk. 96 * page walk. If @c fIsLinearAddrValid is false, pass 97 * 0. 94 98 * @param pWalk The page walk info. 95 99 * @param pGstWalk The guest mode specific page walk info. -
trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h
r93831 r93922 311 311 static FNVMXEXITHANDLER vmxHCExitInstrNested; 312 312 static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested; 313 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 314 static FNVMXEXITHANDLER vmxHCExitEptViolationNested; 315 static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested; 316 # endif 313 317 /** @} */ 314 318 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ … … 5411 5415 switch (uExitReason) 5412 5416 { 5413 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig (pVCpu, pVmxTransient);5414 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation (pVCpu, pVmxTransient);5417 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient); 5418 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient); 5415 5419 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient); 5416 5420 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient); … … 10211 10215 } 10212 10216 10217 10218 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 10219 /** 10220 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). 10221 * Conditional VM-exit. 10222 */ 10223 HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient) 10224 { 10225 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 10226 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging); 10227 10228 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 10229 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); 10230 AssertRCReturn(rc, rc); 10231 10232 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient); 10233 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient); 10234 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient); 10235 10236 RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr; 10237 uint64_t const uExitQual = pVmxTransient->uExitQual; 10238 10239 RTGCPTR GCPtrNested; 10240 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID); 10241 if (fIsLinearAddrValid) 10242 { 10243 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient); 10244 GCPtrNested = pVmxTransient->uGuestLinearAddr; 10245 } 10246 else 10247 GCPtrNested = 0; 10248 10249 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0) 10250 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0) 10251 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ 10252 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE 10253 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0); 10254 10255 PGMPTWALK Walk; 10256 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 10257 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx), GCPhysNested, 10258 fIsLinearAddrValid, GCPtrNested, &Walk); 10259 if (RT_SUCCESS(rcStrict)) 10260 { 10261 if (rcStrict == VINF_SUCCESS) 10262 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 10263 else if (rcStrict == VINF_IEM_RAISED_XCPT) 10264 { 10265 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 10266 rcStrict = VINF_SUCCESS; 10267 } 10268 return rcStrict; 10269 } 10270 10271 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient); 10272 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient); 10273 10274 VMXVEXITEVENTINFO ExitEventInfo; 10275 RT_ZERO(ExitEventInfo); 10276 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo; 10277 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode; 10278 10279 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION) 10280 { 10281 VMXVEXITINFO ExitInfo; 10282 RT_ZERO(ExitInfo); 10283 ExitInfo.uReason = VMX_EXIT_EPT_VIOLATION; 10284 ExitInfo.cbInstr = pVmxTransient->cbExitInstr; 10285 ExitInfo.u64Qual = pVmxTransient->uExitQual; 10286 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr; 10287 ExitInfo.u64GuestPhysAddr = pVmxTransient->uGuestPhysicalAddr; 10288 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo); 10289 } 10290 10291 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG); 10292 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo); 10293 } 10294 10295 10296 /** 10297 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG). 10298 * Conditional VM-exit. 10299 */ 10300 HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient) 10301 { 10302 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 10303 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging); 10304 10305 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 10306 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); 10307 AssertRCReturn(rc, rc); 10308 10309 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient); 10310 10311 PGMPTWALK Walk; 10312 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 10313 RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr; 10314 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx), 10315 GCPhysNested, false /* fIsLinearAddrValid */, 10316 0 /* GCPtrNested*/, &Walk); 10317 if (RT_SUCCESS(rcStrict)) 10318 return VINF_EM_RAW_EMULATE_INSTR; 10319 10320 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient); 10321 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient); 10322 10323 VMXVEXITEVENTINFO ExitEventInfo; 10324 RT_ZERO(ExitEventInfo); 10325 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo; 10326 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode; 10327 10328 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo); 10329 } 10330 # endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */ 10331 10213 10332 /** @} */ 10214 10333 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r93748 r93922 43 43 #include "dtrace/VBoxVMM.h" 44 44 45 /********************************************************************************************************************************* 46 * Defined Constants And Macros * 47 *********************************************************************************************************************************/ 45 48 #ifdef DEBUG_ramshankar 46 49 # define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS … … 58 61 59 62 /********************************************************************************************************************************* 60 * Defined Constants And Macros *61 *********************************************************************************************************************************/62 63 64 /*********************************************************************************************************************************65 63 * Structures and Typedefs * 66 64 *********************************************************************************************************************************/ 67 68 65 /** 69 66 * VMX page allocation information. … … 86 83 * Internal Functions * 87 84 *********************************************************************************************************************************/ 88 89 90 /********************************************************************************************************************************* 91 * Global Variables * 92 *********************************************************************************************************************************/ 93 static bool hmR0VmxShouldSwapEferMsr(PCVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient); 94 static int hmR0VmxExitHostNmi(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo); 85 static bool hmR0VmxShouldSwapEferMsr(PCVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient); 86 static int hmR0VmxExitHostNmi(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo); 95 87 96 88 … … 3520 3512 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_CS_SEL, pVCpu->hmr0.s.vmx.RestoreHost.uHostSelCS); AssertRC(rc); 3521 3513 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_SS_SEL, pVCpu->hmr0.s.vmx.RestoreHost.uHostSelSS); AssertRC(rc); 3522 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_DS_SEL, uSelDS); AssertRC(rc);3523 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_ES_SEL, uSelES); AssertRC(rc);3524 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_FS_SEL, uSelFS); AssertRC(rc);3525 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_GS_SEL, uSelGS); AssertRC(rc);3514 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_DS_SEL, uSelDS); AssertRC(rc); 3515 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_ES_SEL, uSelES); AssertRC(rc); 3516 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_FS_SEL, uSelFS); AssertRC(rc); 3517 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_GS_SEL, uSelGS); AssertRC(rc); 3526 3518 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_TR_SEL, pVCpu->hmr0.s.vmx.RestoreHost.uHostSelTR); AssertRC(rc); 3527 3519 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_GDTR_BASE, pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr.uAddr); AssertRC(rc); 3528 3520 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_IDTR_BASE, pVCpu->hmr0.s.vmx.RestoreHost.HostIdtr.uAddr); AssertRC(rc); 3529 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_TR_BASE, uTRBase); AssertRC(rc);3521 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_TR_BASE, uTRBase); AssertRC(rc); 3530 3522 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_FS_BASE, pVCpu->hmr0.s.vmx.RestoreHost.uHostFSBase); AssertRC(rc); 3531 3523 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_GS_BASE, pVCpu->hmr0.s.vmx.RestoreHost.uHostGSBase); AssertRC(rc); … … 5589 5581 * 5590 5582 * @returns VBox status code. 5591 * @param pVCpu The cross context virtual CPU structure. 5592 */ 5593 static int hmR0VmxMapHCApicAccessPage(PVMCPUCC pVCpu) 5583 * @param pVCpu The cross context virtual CPU structure. 5584 * @param u64MsrApicBase The guest-physical address of the APIC access page. 5585 */ 5586 static int hmR0VmxMapHCApicAccessPage(PVMCPUCC pVCpu, RTGCPHYS GCPhysApicBase) 5594 5587 { 5595 5588 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 5596 uint64_t const u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu); 5597 5598 Assert(PDMHasApic(pVM)); 5599 Assert(u64MsrApicBase); 5600 5601 RTGCPHYS const GCPhysApicBase = u64MsrApicBase & PAGE_BASE_GC_MASK; 5602 Log4Func(("Mappping HC APIC-access page at %#RGp\n", GCPhysApicBase)); 5589 Assert(GCPhysApicBase); 5590 5591 LogFunc(("Mappping HC APIC-access page at %#RGp\n", GCPhysApicBase)); 5603 5592 5604 5593 /* Unalias the existing mapping. */ … … 5611 5600 AssertRCReturn(rc, rc); 5612 5601 5613 /* Update the per-VCPU cache of the APIC base MSR. */5614 pVCpu->hm.s.vmx.u64GstMsrApicBase = u64MsrApicBase;5615 5602 return VINF_SUCCESS; 5616 5603 } … … 6113 6100 && PDMHasApic(pVM)) 6114 6101 { 6115 int rc = hmR0VmxMapHCApicAccessPage(pVCpu); 6102 /* Get the APIC base MSR from the virtual APIC device. */ 6103 uint64_t const uApicBaseMsr = APICGetBaseMsrNoCheck(pVCpu); 6104 6105 /* Map the APIC access page. */ 6106 int rc = hmR0VmxMapHCApicAccessPage(pVCpu, uApicBaseMsr & PAGE_BASE_GC_MASK); 6116 6107 AssertRCReturn(rc, rc); 6108 6109 /* Update the per-VCPU cache of the APIC base MSR corresponding to the mapped APIC access page. */ 6110 pVCpu->hm.s.vmx.u64GstMsrApicBase = uApicBaseMsr; 6117 6111 } 6118 6112 … … 6812 6806 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient); 6813 6807 return rcRun; 6808 } 6809 6810 /* 6811 * Undo temporary disabling of the APIC-access page monitoring we did in hmR0VmxMergeVmcsNested. 6812 * This is needed for NestedTrap0eHandler (and IEM) to cause nested-guest APIC-access VM-exits. 6813 */ 6814 if (VmxTransient.pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS) 6815 { 6816 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 6817 RTGCPHYS const GCPhysApicAccess = pVmcsNstGst->u64AddrApicAccess.u; 6818 PGMHandlerPhysicalReset(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess); 6814 6819 } 6815 6820 … … 7239 7244 return rcStrict; 7240 7245 } 7246 -
trunk/src/VBox/VMM/VMMR0/IEMR0.cpp
r93655 r93922 44 44 { 45 45 int rc = PGMR0HandlerPhysicalTypeSetUpContext(pGVM, PGMPHYSHANDLERKIND_ALL, 0 /*fFlags*/, 46 iemVmxApicAccessPageHandler, NULL /*pfnzPfHandlerR0*/,46 iemVmxApicAccessPageHandler, iemVmxApicAccessPagePfHandler, 47 47 "VMX APIC-access page", pGVM->iem.s.hVmxApicAccessPage); 48 48 AssertLogRelRCReturn(rc, rc); -
trunk/src/VBox/VMM/VMMR0/PGMR0.cpp
r93735 r93922 1213 1213 return rc; 1214 1214 } 1215 1216 1217 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 1218 /** 1219 * Nested \#PF Handler for nested-guest execution using nested paging. 1220 * 1221 * @returns Strict VBox status code (appropriate for trap handling and GC return). 1222 * @param pGVM The global (ring-0) VM structure. 1223 * @param pGVCpu The global (ring-0) CPU structure of the calling 1224 * EMT. 1225 * @param uErr The trap error code. 1226 */ 1227 VMMR0DECL(VBOXSTRICTRC) PGMR0NestedTrap0eHandlerNestedPaging(PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr, 1228 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNested, 1229 bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk) 1230 { 1231 Assert(enmShwPagingMode == PGMMODE_EPT); 1232 NOREF(enmShwPagingMode); 1233 1234 bool fLockTaken; 1235 VBOXSTRICTRC rcStrict = PGM_BTH_NAME_EPT_PROT(NestedTrap0eHandler)(pGVCpu, uErr, pRegFrame, GCPhysNested, fIsLinearAddrValid, 1236 GCPtrNested, pWalk, &fLockTaken); 1237 if (fLockTaken) 1238 { 1239 PGM_LOCK_ASSERT_OWNER(pGVCpu->CTX_SUFF(pVM)); 1240 PGM_UNLOCK(pGVCpu->CTX_SUFF(pVM)); 1241 } 1242 if (rcStrict == VINF_PGM_SYNCPAGE_MODIFIED_PDE) 1243 rcStrict = VINF_SUCCESS; 1244 return rcStrict; 1245 } 1246 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */ 1215 1247 1216 1248 -
trunk/src/VBox/VMM/VMMR0/PGMR0Bth.h
r93115 r93922 22 22 RT_C_DECLS_BEGIN 23 23 PGM_BTH_DECL(int, Trap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken); 24 PGM_BTH_DECL(int, NestedTrap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNested, 25 bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk, bool *pfLockTaken); 24 26 RT_C_DECLS_END 25 27 -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r93905 r93922 4264 4264 * disabled will automatically prevent exposing features that rely on 4265 4265 */ 4266 rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedVmxEpt", &pVM->cpum.s.fNestedVmxEpt, false);4266 rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedVmxEpt", &pVM->cpum.s.fNestedVmxEpt, true); 4267 4267 AssertLogRelRCReturn(rc, rc); 4268 4268 … … 4272 4272 * it. 4273 4273 */ 4274 rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedVmxUnrestrictedGuest", &pVM->cpum.s.fNestedVmxUnrestrictedGuest, false);4274 rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedVmxUnrestrictedGuest", &pVM->cpum.s.fNestedVmxUnrestrictedGuest, true); 4275 4275 AssertLogRelRCReturn(rc, rc); 4276 4276 -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r93905 r93922 1080 1080 { 1081 1081 PVMCPU pVCpu = pVM->apCpusR3[idCpu]; 1082 PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL );1082 PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL, false /* fForce */); 1083 1083 } 1084 1084 } -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r93905 r93922 1024 1024 { 1025 1025 PVMCPU pVCpu = pVM->apCpusR3[i]; 1026 rc = PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL );1026 rc = PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL, false /* fForce */); 1027 1027 if (RT_FAILURE(rc)) 1028 1028 break; … … 1647 1647 pVM->pgm.s.HCPhysInvMmioPg |= UINT64_C(0x000f0000000000); 1648 1648 } 1649 Assert(pVM->cpum.ro.GuestFeatures.cMaxPhysAddrWidth == cMaxPhysAddrWidth); 1649 /* Disabled the below assertion -- triggers 24 vs 39 on my Intel Skylake box for a 32-bit (Guest-type Other/Unknown) VM. */ 1650 //AssertMsg(pVM->cpum.ro.GuestFeatures.cMaxPhysAddrWidth == cMaxPhysAddrWidth, 1651 // ("CPUM %u - PGM %u\n", pVM->cpum.ro.GuestFeatures.cMaxPhysAddrWidth, cMaxPhysAddrWidth)); 1650 1652 #else 1651 1653 uint32_t const cMaxPhysAddrWidth = pVM->cpum.ro.GuestFeatures.cMaxPhysAddrWidth; … … 1852 1854 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS; 1853 1855 1854 int rc = PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL );1856 int rc = PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL, false /* fForce */); 1855 1857 AssertReleaseRC(rc); 1856 1858 … … 1918 1920 PVMCPU pVCpu = pVM->apCpusR3[i]; 1919 1921 1920 int rc = PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL );1922 int rc = PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL, false /* fForce */); 1921 1923 AssertReleaseRC(rc); 1922 1924 … … 2298 2300 { 2299 2301 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID; 2300 int rc = PGMHCChangeMode(pVM, pVCpu, PGMGetGuestMode(pVCpu) );2302 int rc = PGMHCChangeMode(pVM, pVCpu, PGMGetGuestMode(pVCpu), false /* fForce */); 2301 2303 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 2302 2304 AssertRCReturn(rc, rc); -
trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp
r93716 r93922 3190 3190 PVMCPU pVCpu = pVM->apCpusR3[i]; 3191 3191 3192 rc = PGMHCChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode );3192 rc = PGMHCChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode, false /* fForce */); 3193 3193 AssertLogRelRCReturn(rc, rc); 3194 3194 -
trunk/src/VBox/VMM/include/IEMInternal.h
r93906 r93922 942 942 # define IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR RT_BIT_32(0) 943 943 /** Translating a nested-guest linear address failed accessing a 944 * paging-structure entry . */944 * paging-structure entry or updating accessed/dirty bits. */ 945 945 # define IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE RT_BIT_32(1) 946 946 /** @} */ 947 947 948 948 PGM_ALL_CB2_PROTO(FNPGMPHYSHANDLER) iemVmxApicAccessPageHandler; 949 # ifndef IN_RING3 950 DECLCALLBACK(FNPGMRZPHYSPFHANDLER) iemVmxApicAccessPagePfHandler; 951 # endif 949 952 #endif 950 953 -
trunk/src/VBox/VMM/include/PGMInternal.h
r93905 r93922 2745 2745 #ifndef IN_RING3 2746 2746 DECLCALLBACKMEMBER(int, pfnTrap0eHandler,(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken)); 2747 DECLCALLBACKMEMBER(int, pfnNestedTrap0eHandler,(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNested, 2748 bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk, 2749 bool *pfLockTaken)); 2747 2750 #endif 2748 2751 #ifdef VBOX_STRICT
Note:
See TracChangeset
for help on using the changeset viewer.