Changeset 80064 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jul 31, 2019 10:31:36 AM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 132503
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r80055 r80064 27 27 #include <VBox/vmm/mm.h> 28 28 #include <VBox/vmm/em.h> 29 #ifndef IN_RC 30 # include <VBox/vmm/nem.h> 31 # include <VBox/vmm/hm.h> 32 #endif 33 #if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0) 34 # include <VBox/vmm/selm.h> 35 #endif 29 #include <VBox/vmm/nem.h> 30 #include <VBox/vmm/hm.h> 36 31 #include "CPUMInternal.h" 37 32 #include <VBox/vmm/vm.h> … … 1030 1025 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible; 1031 1026 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible; 1032 1033 #ifdef VBOX_WITH_RAW_MODE_NOT_R01034 /*1035 * Patch manager saved state legacy pain.1036 */1037 PVM pVM = pVCpu->CTX_SUFF(pVM);1038 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));1039 if (pLeaf)1040 {1041 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))1042 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx;1043 else1044 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx & ~X86_CPUID_FEATURE_EDX_APIC;1045 }1046 1047 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));1048 if (pLeaf)1049 {1050 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))1051 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx;1052 else1053 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx & ~X86_CPUID_AMD_FEATURE_EDX_APIC;1054 }1055 #endif1056 1057 1027 return fOld; 1058 1028 } … … 1312 1282 * In ring-0 we might end up here when just single stepping. 1313 1283 */ 1314 #if defined(IN_RC) || defined(IN_RING0)1284 #ifdef IN_RING0 1315 1285 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) 1316 1286 { 1317 # ifdef IN_RC1318 ASMSetDR7(X86_DR7_INIT_VAL);1319 # endif1320 1287 if (pVCpu->cpum.s.Hyper.dr[0]) 1321 1288 ASMSetDR0(0); … … 1382 1349 if (fNewComponents) 1383 1350 { 1384 #if defined(IN_RING0) || defined(IN_RC)1351 #ifdef IN_RING0 1385 1352 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST) 1386 1353 { … … 1618 1585 } 1619 1586 1620 #ifdef IN_RC1621 1622 /**1623 * Lazily sync in the FPU/XMM state.1624 *1625 * @returns VBox status code.1626 * @param pVCpu The cross context virtual CPU structure.1627 */1628 VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)1629 {1630 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);1631 }1632 1633 #endif /* !IN_RC */1634 1587 1635 1588 /** … … 1784 1737 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl; 1785 1738 else 1786 {1787 1739 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL); 1788 #ifdef VBOX_WITH_RAW_MODE_NOT_R01789 # ifdef VBOX_WITH_RAW_RING11790 if (pVCpu->cpum.s.fRawEntered)1791 {1792 if (uCpl == 1)1793 uCpl = 0;1794 }1795 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */1796 # else1797 if (uCpl == 1)1798 uCpl = 0;1799 # endif1800 #endif1801 }1802 1740 } 1803 1741 else … … 2037 1975 VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu) 2038 1976 { 2039 #ifndef IN_RC2040 1977 /* 2041 1978 * Return the state of guest-NMI blocking in any of the following cases: … … 2059 1996 */ 2060 1997 return CPUMIsGuestVmxVirtNmiBlocking(pVCpu, pCtx); 2061 #else2062 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);2063 #endif2064 1998 } 2065 1999 … … 2073 2007 VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock) 2074 2008 { 2075 #ifndef IN_RC2076 2009 /* 2077 2010 * Set the state of guest-NMI blocking in any of the following cases: … … 2107 2040 */ 2108 2041 return CPUMSetGuestVmxVirtNmiBlocking(pVCpu, pCtx, fBlock); 2109 #else2110 if (fBlock)2111 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);2112 else2113 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);2114 #endif2115 2042 } 2116 2043 … … 2129 2056 /** @todo Optimization: Avoid this function call and use a pointer to the 2130 2057 * relevant eflags instead (setup during VMRUN instruction emulation). */ 2131 #ifdef IN_RC2132 RT_NOREF2(pVCpu, pCtx);2133 AssertReleaseFailedReturn(false);2134 #else2135 2058 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 2136 2059 … … 2142 2065 2143 2066 return fEFlags.Bits.u1IF; 2144 #endif2145 2067 } 2146 2068 … … 2158 2080 VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx) 2159 2081 { 2160 #ifdef IN_RC2161 RT_NOREF2(pVCpu, pCtx);2162 AssertReleaseFailedReturn(false);2163 #else2164 2082 RT_NOREF(pVCpu); 2165 2083 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); … … 2173 2091 2174 2092 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF); 2175 #endif2176 2093 } 2177 2094 … … 2185 2102 VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx) 2186 2103 { 2187 #ifdef IN_RC2188 RT_NOREF(pCtx);2189 AssertReleaseFailedReturn(0);2190 #else2191 2104 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; 2192 2105 return pVmcbCtrl->IntCtrl.n.u8VIntrVector; 2193 #endif2194 2106 } 2195 2107 … … 2266 2178 * @returns The TSC offset after applying any nested-guest TSC offset. 2267 2179 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 2268 * @param uT icksThe guest TSC.2180 * @param uTscValue The guest TSC. 2269 2181 * 2270 2182 * @sa CPUMRemoveNestedGuestTscOffset. 2271 2183 */ 2272 VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTicks) 2273 { 2274 #ifndef IN_RC 2184 VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue) 2185 { 2275 2186 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 2276 2187 if (CPUMIsGuestInVmxNonRootMode(pCtx)) … … 2279 2190 Assert(pVmcs); 2280 2191 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING)) 2281 return uT icks+ pVmcs->u64TscOffset.u;2282 return uT icks;2192 return uTscValue + pVmcs->u64TscOffset.u; 2193 return uTscValue; 2283 2194 } 2284 2195 2285 2196 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 2286 2197 { 2287 uint64_t u64TscOffset;2288 if (!HMGetGuestSvmTscOffset(pVCpu, & u64TscOffset))2198 uint64_t offTsc; 2199 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc)) 2289 2200 { 2290 2201 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2291 2202 Assert(pVmcb); 2292 u64TscOffset= pVmcb->ctrl.u64TSCOffset;2203 offTsc = pVmcb->ctrl.u64TSCOffset; 2293 2204 } 2294 return uTicks + u64TscOffset; 2295 } 2296 #else 2297 RT_NOREF(pVCpu); 2298 #endif 2299 return uTicks; 2205 return uTscValue + offTsc; 2206 } 2207 return uTscValue; 2300 2208 } 2301 2209 … … 2307 2215 * @returns The TSC offset after removing any nested-guest TSC offset. 2308 2216 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 2309 * @param uT icksThe nested-guest TSC.2217 * @param uTscValue The nested-guest TSC. 2310 2218 * 2311 2219 * @sa CPUMApplyNestedGuestTscOffset. 2312 2220 */ 2313 VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTicks) 2314 { 2315 #ifndef IN_RC 2221 VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue) 2222 { 2316 2223 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 2317 2224 if (CPUMIsGuestInVmxNonRootMode(pCtx)) … … 2321 2228 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 2322 2229 Assert(pVmcs); 2323 return uT icks- pVmcs->u64TscOffset.u;2230 return uTscValue - pVmcs->u64TscOffset.u; 2324 2231 } 2325 return uT icks;2232 return uTscValue; 2326 2233 } 2327 2234 2328 2235 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 2329 2236 { 2330 uint64_t u64TscOffset;2331 if (!HMGetGuestSvmTscOffset(pVCpu, & u64TscOffset))2237 uint64_t offTsc; 2238 if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc)) 2332 2239 { 2333 2240 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2334 2241 Assert(pVmcb); 2335 u64TscOffset= pVmcb->ctrl.u64TSCOffset;2242 offTsc = pVmcb->ctrl.u64TSCOffset; 2336 2243 } 2337 return uTicks - u64TscOffset; 2338 } 2339 #else 2340 RT_NOREF(pVCpu); 2341 #endif 2342 return uTicks; 2244 return uTscValue - offTsc; 2245 } 2246 return uTscValue; 2343 2247 } 2344 2248 … … 2359 2263 if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport) 2360 2264 { 2361 #ifndef IN_RC2362 2265 switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK) 2363 2266 { … … 2383 2286 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2); 2384 2287 } 2385 #else2386 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);2387 #endif2388 2288 } 2389 2289 return VINF_SUCCESS; … … 2538 2438 VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVM pVM, uint64_t u64VmcsField) 2539 2439 { 2540 #ifndef IN_RC2541 2440 uint32_t const uFieldEncHi = RT_HI_U32(u64VmcsField); 2542 2441 uint32_t const uFieldEncLo = RT_LO_U32(u64VmcsField); … … 2792 2691 2793 2692 return false; 2794 #else2795 RT_NOREF2(pVM, u64VmcsField);2796 return false;2797 #endif2798 2693 } 2799 2694 … … 2809 2704 VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess) 2810 2705 { 2811 #ifndef IN_RC2812 2706 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 2813 2707 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_UNCOND_IO_EXIT)) … … 2824 2718 2825 2719 return false; 2826 #else2827 RT_NOREF3(pVCpu, u16Port, cbAccess);2828 return false;2829 #endif2830 2720 } 2831 2721 … … 2840 2730 VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3) 2841 2731 { 2842 #ifndef IN_RC2843 2732 /* 2844 2733 * If the CR3-load exiting control is set and the new CR3 value does not … … 2867 2756 } 2868 2757 return false; 2869 #else2870 RT_NOREF2(pVCpu, uNewCr3);2871 return false;2872 #endif2873 2758 } 2874 2759 … … 2886 2771 VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64VmcsField) 2887 2772 { 2888 #ifndef IN_RC2889 2773 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest)); 2890 2774 Assert( uExitReason == VMX_EXIT_VMREAD … … 2915 2799 Assert(u32VmcsField >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 2916 2800 return ASMBitTest(pbBitmap + (u32VmcsField >> 3), u32VmcsField & 7); 2917 #else2918 RT_NOREF3(pVCpu, uExitReason, u64VmcsField);2919 return false;2920 #endif2921 2801 } 2922 2802
Note:
See TracChangeset
for help on using the changeset viewer.