VirtualBox

Changeset 80064 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Jul 31, 2019 10:31:36 AM (6 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
132503
Message:

VMM: Kicking out raw-mode and 32-bit hosts - CPUM. bugref:9517 bugref:9511

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r80055 r80064  
    2727#include <VBox/vmm/mm.h>
    2828#include <VBox/vmm/em.h>
    29 #ifndef IN_RC
    30 # include <VBox/vmm/nem.h>
    31 # include <VBox/vmm/hm.h>
    32 #endif
    33 #if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
    34 # include <VBox/vmm/selm.h>
    35 #endif
     29#include <VBox/vmm/nem.h>
     30#include <VBox/vmm/hm.h>
    3631#include "CPUMInternal.h"
    3732#include <VBox/vmm/vm.h>
     
    10301025    bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
    10311026    pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
    1032 
    1033 #ifdef VBOX_WITH_RAW_MODE_NOT_R0
    1034     /*
    1035      * Patch manager saved state legacy pain.
    1036      */
    1037     PVM pVM = pVCpu->CTX_SUFF(pVM);
    1038     PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
    1039     if (pLeaf)
    1040     {
    1041         if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
    1042             pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx;
    1043         else
    1044             pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx & ~X86_CPUID_FEATURE_EDX_APIC;
    1045     }
    1046 
    1047     pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
    1048     if (pLeaf)
    1049     {
    1050         if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
    1051             pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx;
    1052         else
    1053             pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx & ~X86_CPUID_AMD_FEATURE_EDX_APIC;
    1054     }
    1055 #endif
    1056 
    10571027    return fOld;
    10581028}
     
    13121282         * In ring-0 we might end up here when just single stepping.
    13131283         */
    1314 #if defined(IN_RC) || defined(IN_RING0)
     1284#ifdef IN_RING0
    13151285        if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
    13161286        {
    1317 # ifdef IN_RC
    1318             ASMSetDR7(X86_DR7_INIT_VAL);
    1319 # endif
    13201287            if (pVCpu->cpum.s.Hyper.dr[0])
    13211288                ASMSetDR0(0);
     
    13821349        if (fNewComponents)
    13831350        {
    1384 #if defined(IN_RING0) || defined(IN_RC)
     1351#ifdef IN_RING0
    13851352            if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
    13861353            {
     
    16181585}
    16191586
    1620 #ifdef IN_RC
    1621 
    1622 /**
    1623  * Lazily sync in the FPU/XMM state.
    1624  *
    1625  * @returns VBox status code.
    1626  * @param   pVCpu       The cross context virtual CPU structure.
    1627  */
    1628 VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
    1629 {
    1630     return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
    1631 }
    1632 
    1633 #endif /* !IN_RC */
    16341587
    16351588/**
     
    17841737                uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
    17851738            else
    1786             {
    17871739                uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
    1788 #ifdef VBOX_WITH_RAW_MODE_NOT_R0
    1789 # ifdef VBOX_WITH_RAW_RING1
    1790                 if (pVCpu->cpum.s.fRawEntered)
    1791                 {
    1792                     if (uCpl == 1)
    1793                         uCpl = 0;
    1794                 }
    1795                 Assert(uCpl != 2);  /* ring 2 support not allowed anymore. */
    1796 # else
    1797                 if (uCpl == 1)
    1798                     uCpl = 0;
    1799 # endif
    1800 #endif
    1801             }
    18021740        }
    18031741        else
     
    20371975VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu)
    20381976{
    2039 #ifndef IN_RC
    20401977    /*
    20411978     * Return the state of guest-NMI blocking in any of the following cases:
     
    20591996     */
    20601997    return CPUMIsGuestVmxVirtNmiBlocking(pVCpu, pCtx);
    2061 #else
    2062     return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    2063 #endif
    20641998}
    20651999
     
    20732007VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock)
    20742008{
    2075 #ifndef IN_RC
    20762009    /*
    20772010     * Set the state of guest-NMI blocking in any of the following cases:
     
    21072040     */
    21082041    return CPUMSetGuestVmxVirtNmiBlocking(pVCpu, pCtx, fBlock);
    2109 #else
    2110     if (fBlock)
    2111         VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    2112     else
    2113         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
    2114 #endif
    21152042}
    21162043
     
    21292056    /** @todo Optimization: Avoid this function call and use a pointer to the
    21302057     *        relevant eflags instead (setup during VMRUN instruction emulation). */
    2131 #ifdef IN_RC
    2132     RT_NOREF2(pVCpu, pCtx);
    2133     AssertReleaseFailedReturn(false);
    2134 #else
    21352058    Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
    21362059
     
    21422065
    21432066    return fEFlags.Bits.u1IF;
    2144 #endif
    21452067}
    21462068
     
    21582080VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
    21592081{
    2160 #ifdef IN_RC
    2161     RT_NOREF2(pVCpu, pCtx);
    2162     AssertReleaseFailedReturn(false);
    2163 #else
    21642082    RT_NOREF(pVCpu);
    21652083    Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
     
    21732091
    21742092    return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
    2175 #endif
    21762093}
    21772094
     
    21852102VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx)
    21862103{
    2187 #ifdef IN_RC
    2188     RT_NOREF(pCtx);
    2189     AssertReleaseFailedReturn(0);
    2190 #else
    21912104    PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
    21922105    return pVmcbCtrl->IntCtrl.n.u8VIntrVector;
    2193 #endif
    21942106}
    21952107
     
    22662178 * @returns The TSC offset after applying any nested-guest TSC offset.
    22672179 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    2268  * @param   uTicks      The guest TSC.
     2180 * @param   uTscValue   The guest TSC.
    22692181 *
    22702182 * @sa      CPUMRemoveNestedGuestTscOffset.
    22712183 */
    2272 VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTicks)
    2273 {
    2274 #ifndef IN_RC
     2184VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
     2185{
    22752186    PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
    22762187    if (CPUMIsGuestInVmxNonRootMode(pCtx))
     
    22792190        Assert(pVmcs);
    22802191        if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
    2281             return uTicks + pVmcs->u64TscOffset.u;
    2282         return uTicks;
     2192            return uTscValue + pVmcs->u64TscOffset.u;
     2193        return uTscValue;
    22832194    }
    22842195
    22852196    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    22862197    {
    2287         uint64_t u64TscOffset;
    2288         if (!HMGetGuestSvmTscOffset(pVCpu, &u64TscOffset))
     2198        uint64_t offTsc;
     2199        if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
    22892200        {
    22902201            PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    22912202            Assert(pVmcb);
    2292             u64TscOffset = pVmcb->ctrl.u64TSCOffset;
     2203            offTsc = pVmcb->ctrl.u64TSCOffset;
    22932204        }
    2294         return uTicks + u64TscOffset;
    2295     }
    2296 #else
    2297     RT_NOREF(pVCpu);
    2298 #endif
    2299     return uTicks;
     2205        return uTscValue + offTsc;
     2206    }
     2207    return uTscValue;
    23002208}
    23012209
     
    23072215 * @returns The TSC offset after removing any nested-guest TSC offset.
    23082216 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    2309  * @param   uTicks      The nested-guest TSC.
     2217 * @param   uTscValue   The nested-guest TSC.
    23102218 *
    23112219 * @sa      CPUMApplyNestedGuestTscOffset.
    23122220 */
    2313 VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTicks)
    2314 {
    2315 #ifndef IN_RC
     2221VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue)
     2222{
    23162223    PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
    23172224    if (CPUMIsGuestInVmxNonRootMode(pCtx))
     
    23212228            PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
    23222229            Assert(pVmcs);
    2323             return uTicks - pVmcs->u64TscOffset.u;
     2230            return uTscValue - pVmcs->u64TscOffset.u;
    23242231        }
    2325         return uTicks;
     2232        return uTscValue;
    23262233    }
    23272234
    23282235    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    23292236    {
    2330         uint64_t u64TscOffset;
    2331         if (!HMGetGuestSvmTscOffset(pVCpu, &u64TscOffset))
     2237        uint64_t offTsc;
     2238        if (!HMGetGuestSvmTscOffset(pVCpu, &offTsc))
    23322239        {
    23332240            PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    23342241            Assert(pVmcb);
    2335             u64TscOffset = pVmcb->ctrl.u64TSCOffset;
     2242            offTsc = pVmcb->ctrl.u64TSCOffset;
    23362243        }
    2337         return uTicks - u64TscOffset;
    2338     }
    2339 #else
    2340     RT_NOREF(pVCpu);
    2341 #endif
    2342     return uTicks;
     2244        return uTscValue - offTsc;
     2245    }
     2246    return uTscValue;
    23432247}
    23442248
     
    23592263    if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
    23602264    {
    2361 #ifndef IN_RC
    23622265        switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
    23632266        {
     
    23832286                AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
    23842287        }
    2385 #else
    2386         AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
    2387 #endif
    23882288    }
    23892289    return VINF_SUCCESS;
     
    25382438VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVM pVM, uint64_t u64VmcsField)
    25392439{
    2540 #ifndef IN_RC
    25412440    uint32_t const uFieldEncHi = RT_HI_U32(u64VmcsField);
    25422441    uint32_t const uFieldEncLo = RT_LO_U32(u64VmcsField);
     
    27922691
    27932692    return false;
    2794 #else
    2795     RT_NOREF2(pVM, u64VmcsField);
    2796     return false;
    2797 #endif
    27982693}
    27992694
     
    28092704VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
    28102705{
    2811 #ifndef IN_RC
    28122706    PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
    28132707    if (CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_UNCOND_IO_EXIT))
     
    28242718
    28252719    return false;
    2826 #else
    2827     RT_NOREF3(pVCpu, u16Port, cbAccess);
    2828     return false;
    2829 #endif
    28302720}
    28312721
     
    28402730VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3)
    28412731{
    2842 #ifndef IN_RC
    28432732    /*
    28442733     * If the CR3-load exiting control is set and the new CR3 value does not
     
    28672756    }
    28682757    return false;
    2869 #else
    2870     RT_NOREF2(pVCpu, uNewCr3);
    2871     return false;
    2872 #endif
    28732758}
    28742759
     
    28862771VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64VmcsField)
    28872772{
    2888 #ifndef IN_RC
    28892773    Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest));
    28902774    Assert(   uExitReason == VMX_EXIT_VMREAD
     
    29152799    Assert(u32VmcsField >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
    29162800    return ASMBitTest(pbBitmap + (u32VmcsField >> 3), u32VmcsField & 7);
    2917 #else
    2918     RT_NOREF3(pVCpu, uExitReason, u64VmcsField);
    2919     return false;
    2920 #endif
    29212801}
    29222802
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette