Changeset 93963 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Feb 28, 2022 8:39:08 AM (3 years ago)
- svn:sync-xref-src-repo-rev:
- 150193
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r93655 r93963 1633 1633 return SVMR0ImportStateOnDemand(pVCpu, fWhat); 1634 1634 } 1635 1636 1637 /** 1638 * Gets HM VM-exit auxiliary information. 1639 * 1640 * @returns VBox status code. 1641 * @param pVCpu The cross context CPU structure. 1642 * @param pHmExitAux Where to store the auxiliary info. 1643 * @param fWhat What to get, see HMVMX_READ_XXX. This is ignored/unused 1644 * on AMD-V. 1645 * 1646 * @remarks Currently this works only when executing a nested-guest using 1647 * hardware-assisted execution as it's where the auxiliary information is 1648 * required outside of HM. In the future we can make this available while 1649 * executing a regular (non-nested) guest if necessary. 1650 */ 1651 VMMR0_INT_DECL(int) HMR0GetExitAuxInfo(PVMCPUCC pVCpu, PHMEXITAUX pHmExitAux, uint32_t fWhat) 1652 { 1653 Assert(pHmExitAux); 1654 Assert(!(fWhat & ~HMVMX_READ_VALID_MASK)); 1655 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported) 1656 return VMXR0GetExitAuxInfo(pVCpu, &pHmExitAux->Vmx, fWhat); 1657 return SVMR0GetExitAuxInfo(pVCpu, &pHmExitAux->Svm); 1658 } 1659 1635 1660 1636 1661 #ifdef VBOX_STRICT -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r93931 r93963 287 287 | HMSVM_VMCB_CLEAN_AVIC) 288 288 /** @} */ 289 290 /** @name SVM transient.291 *292 * A state structure for holding miscellaneous information across AMD-V293 * VMRUN/\#VMEXIT operation, restored after the transition.294 *295 * @{ */296 typedef struct SVMTRANSIENT297 {298 /** The host's rflags/eflags. */299 RTCCUINTREG fEFlags;300 /** The \#VMEXIT exit code (the EXITCODE field in the VMCB). */301 uint64_t u64ExitCode;302 303 /** The guest's TPR value used for TPR shadowing. */304 uint8_t u8GuestTpr;305 /** Alignment. */306 uint8_t abAlignment0[7];307 308 /** Pointer to the currently executing VMCB. */309 PSVMVMCB pVmcb;310 311 /** Whether we are currently executing a nested-guest. */312 bool fIsNestedGuest;313 /** Whether the guest debug state was active at the time of \#VMEXIT. */314 bool fWasGuestDebugStateActive;315 /** Whether the hyper debug state was active at the time of \#VMEXIT. */316 bool fWasHyperDebugStateActive;317 /** Whether the TSC offset mode needs to be updated. */318 bool fUpdateTscOffsetting;319 /** Whether the TSC_AUX MSR needs restoring on \#VMEXIT. */320 bool fRestoreTscAuxMsr;321 /** Whether the \#VMEXIT was caused by a page-fault during delivery of a322 * contributary exception or a page-fault. */323 bool fVectoringDoublePF;324 /** Whether the \#VMEXIT was caused by a page-fault during delivery of an325 * external interrupt or NMI. */326 bool fVectoringPF;327 /** Padding. */328 bool afPadding0;329 } SVMTRANSIENT;330 /** Pointer to SVM transient state. */331 typedef SVMTRANSIENT *PSVMTRANSIENT;332 /** Pointer to a const SVM transient state. */333 typedef const SVMTRANSIENT *PCSVMTRANSIENT;334 335 AssertCompileSizeAlignment(SVMTRANSIENT, sizeof(uint64_t));336 AssertCompileMemberAlignment(SVMTRANSIENT, u64ExitCode, sizeof(uint64_t));337 AssertCompileMemberAlignment(SVMTRANSIENT, pVmcb, sizeof(uint64_t));338 /** @} */339 289 340 290 /** … … 2959 2909 2960 2910 /** 2911 * Gets SVM \#VMEXIT auxiliary information. 2912 * 2913 * @returns VBox status code. 2914 * @param pVCpu The cross context virtual CPU structure. 2915 * @param pSvmExitAux Where to store the auxiliary info. 2916 */ 2917 VMMR0DECL(int) SVMR0GetExitAuxInfo(PVMCPUCC pVCpu, PSVMEXITAUX pSvmExitAux) 2918 { 2919 PCSVMTRANSIENT pSvmTransient = pVCpu->hmr0.s.svm.pSvmTransient; 2920 if (RT_LIKELY(pSvmTransient)) 2921 { 2922 PCSVMVMCB pVmcb = pSvmTransient->pVmcb; 2923 if (RT_LIKELY(pVmcb)) 2924 { 2925 pSvmExitAux->u64ExitCode = pVmcb->ctrl.u64ExitCode; 2926 pSvmExitAux->u64ExitInfo1 = pVmcb->ctrl.u64ExitInfo1; 2927 pSvmExitAux->u64ExitInfo2 = pVmcb->ctrl.u64ExitInfo2; 2928 pSvmExitAux->ExitIntInfo = pVmcb->ctrl.ExitIntInfo; 2929 return VINF_SUCCESS; 2930 } 2931 return VERR_SVM_IPE_5; 2932 } 2933 return VERR_NOT_AVAILABLE; 2934 } 2935 2936 2937 /** 2961 2938 * Does the necessary state syncing before returning to ring-3 for any reason 2962 2939 * (longjmp, preemption, voluntary exits to ring-3) from AMD-V. … … 4556 4533 SvmTransient.fIsNestedGuest = true; 4557 4534 4535 /* Setup pointer so PGM/IEM can query #VMEXIT auxiliary info. on demand in ring-0. */ 4536 pVCpu->hmr0.s.svm.pSvmTransient = &SvmTransient; 4537 4558 4538 VBOXSTRICTRC rc = VERR_INTERNAL_ERROR_4; 4559 4539 for (;;) … … 4626 4606 /** @todo NSTSVM: handle single-stepping. */ 4627 4607 } 4608 4609 /* Ensure #VMEXIT auxiliary info. is no longer available. */ 4610 pVCpu->hmr0.s.svm.pSvmTransient = NULL; 4628 4611 4629 4612 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
r93115 r93963 52 52 VMMR0DECL(int) SVMR0ImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat); 53 53 VMMR0DECL(int) SVMR0InvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCVirt); 54 VMMR0DECL(int) SVMR0GetExitAuxInfo(PVMCPUCC pVCpu, PSVMEXITAUX pSvmExitAux); 54 55 55 56 /** -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r93932 r93963 4834 4834 4835 4835 /** 4836 * Gets VMX VM-exit auxiliary information. 4837 * 4838 * @returns VBox status code. 4839 * @param pVCpu The cross context virtual CPU structure. 4840 * @param pVmxExitAux Where to store the VM-exit auxiliary info. 4841 * @param fWhat What to fetch, HMVMX_READ_XXX. 4842 */ 4843 VMMR0DECL(int) VMXR0GetExitAuxInfo(PVMCPUCC pVCpu, PVMXEXITAUX pVmxExitAux, uint32_t fWhat) 4844 { 4845 PVMXTRANSIENT pVmxTransient = pVCpu->hmr0.s.vmx.pVmxTransient; 4846 if (RT_LIKELY(pVmxTransient)) 4847 { 4848 AssertCompile(sizeof(fWhat) == sizeof(pVmxTransient->fVmcsFieldsRead)); 4849 fWhat &= ~pVmxTransient->fVmcsFieldsRead; 4850 4851 /* The exit reason is always available. */ 4852 pVmxExitAux->uReason = pVmxTransient->uExitReason; 4853 4854 if (fWhat & HMVMX_READ_EXIT_QUALIFICATION) 4855 { 4856 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient); 4857 fWhat &= ~HMVMX_READ_EXIT_QUALIFICATION; 4858 pVmxExitAux->u64Qual = pVmxTransient->uExitQual; 4859 } 4860 4861 if (fWhat & HMVMX_READ_IDT_VECTORING_INFO) 4862 { 4863 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient); 4864 fWhat &= ~HMVMX_READ_IDT_VECTORING_INFO; 4865 pVmxExitAux->uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo; 4866 } 4867 4868 if (fWhat & HMVMX_READ_IDT_VECTORING_ERROR_CODE) 4869 { 4870 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient); 4871 fWhat &= ~HMVMX_READ_IDT_VECTORING_ERROR_CODE; 4872 pVmxExitAux->uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode; 4873 } 4874 4875 if (fWhat & HMVMX_READ_EXIT_INSTR_LEN) 4876 { 4877 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient); 4878 fWhat &= ~HMVMX_READ_EXIT_INSTR_LEN; 4879 pVmxExitAux->cbInstr = pVmxTransient->cbExitInstr; 4880 } 4881 4882 if (fWhat & HMVMX_READ_EXIT_INTERRUPTION_INFO) 4883 { 4884 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient); 4885 fWhat &= ~HMVMX_READ_EXIT_INTERRUPTION_INFO; 4886 pVmxExitAux->uExitIntInfo = pVmxTransient->uExitIntInfo; 4887 } 4888 4889 if (fWhat & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE) 4890 { 4891 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient); 4892 fWhat &= ~HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE; 4893 pVmxExitAux->uExitIntErrCode = pVmxTransient->uExitIntErrorCode; 4894 } 4895 4896 if (fWhat & HMVMX_READ_EXIT_INSTR_INFO) 4897 { 4898 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient); 4899 fWhat &= ~HMVMX_READ_EXIT_INSTR_INFO; 4900 pVmxExitAux->InstrInfo.u = pVmxTransient->ExitInstrInfo.u; 4901 } 4902 4903 if (fWhat & HMVMX_READ_GUEST_LINEAR_ADDR) 4904 { 4905 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient); 4906 fWhat &= ~HMVMX_READ_GUEST_LINEAR_ADDR; 4907 pVmxExitAux->u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr; 4908 } 4909 4910 if (fWhat & HMVMX_READ_GUEST_PHYSICAL_ADDR) 4911 { 4912 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient); 4913 fWhat &= ~HMVMX_READ_GUEST_PHYSICAL_ADDR; 4914 pVmxExitAux->u64GuestPhysAddr = pVmxTransient->uGuestPhysicalAddr; 4915 } 4916 4917 if (fWhat & HMVMX_READ_GUEST_PENDING_DBG_XCPTS) 4918 { 4919 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient); 4920 fWhat &= ~HMVMX_READ_GUEST_PENDING_DBG_XCPTS; 4921 pVmxExitAux->u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts; 4922 } 4923 4924 AssertMsg(!fWhat, ("fWhat=%#RX32 fVmcsFieldsRead=%#RX32\n", fWhat, pVmxTransient->fVmcsFieldsRead)); 4925 return VINF_SUCCESS; 4926 } 4927 return VERR_NOT_AVAILABLE; 4928 } 4929 4930 4931 /** 4836 4932 * Does the necessary state syncing before returning to ring-3 for any reason 4837 4933 * (longjmp, preemption, voluntary exits to ring-3) from VT-x. … … 6774 6870 Assert(VmxTransient.pVmcsInfo == &pVCpu->hmr0.s.vmx.VmcsInfoNstGst); 6775 6871 6872 /* Setup pointer so PGM/IEM can query VM-exit auxiliary info. on demand in ring-0. */ 6873 pVCpu->hmr0.s.vmx.pVmxTransient = &VmxTransient; 6874 6776 6875 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5; 6777 6876 for (;;) … … 6806 6905 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x); 6807 6906 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient); 6808 return rcRun; 6907 rcStrict = rcRun; 6908 break; 6809 6909 } 6810 6910 … … 6855 6955 break; 6856 6956 } 6957 6958 /* Ensure VM-exit auxiliary info. is no longer available. */ 6959 pVCpu->hmr0.s.vmx.pVmxTransient = NULL; 6857 6960 6858 6961 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
r93115 r93963 45 45 VMMR0DECL(int) VMXR0InvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCVirt); 46 46 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat); 47 VMMR0DECL(int) VMXR0GetExitAuxInfo(PVMCPUCC pVCpu, PVMXEXITAUX pVmxExitAux, uint32_t fWhat); 47 48 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPUCC pVCpu); 48 49 #endif /* IN_RING0 */
Note:
See TracChangeset
for help on using the changeset viewer.