VirtualBox

Changeset 73606 in vbox for trunk/src/VBox/VMM/VMMR3


Ignore:
Timestamp:
Aug 10, 2018 7:38:56 AM (6 years ago)
Author:
vboxsync
Message:

VMM: Nested VMX: bugref:9180 Various bits:

  • IEM: Started VMXON, VMXOFF implementation, use IEM_OPCODE_GET_NEXT_RM.
  • IEM: Fixed INVPCID C impl, removed unused IEMExecDecodedInvpcid.
  • IEM: Updated iemCImpl_load_CrX to check for CR0/CR4 fixed bits in VMX.
  • IEM: Update offModRm to reset/re-initialize where needed.
  • CPUM: Added VMX root, non-root mode and other bits and updated a few places where they're used.
  • HM: Started adding fine-grained VMX instruction failure diagnostics.
  • HM: Made VM instruction error an enum.
  • HM: Added HMVMXAll.cpp for all context VMX code.
  • Ensure building with VBOX_WITH_NESTED_HWVIRT_[SVM|VMX] does the right thing based on host CPU.
  • CPUM: Added dumping of nested-VMX CPUMCTX state.
  • HMVMXR0: Added memory operand decoding.
  • HMVMXR0: VMX instr. privilege checks (CR0/CR4 read shadows are not consulted, so we need to do them)
  • HM: Added some more bit-field representaions.
  • Recompiler: Refuse to run when in nested-VMX guest code.
Location:
trunk/src/VBox/VMM/VMMR3
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r73389 r73606  
    27752775    PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
    27762776    static const char *const s_aHwvirtModes[] = { "No/inactive", "SVM", "VMX", "Common" };
    2777     uint8_t const idxHwvirtState = CPUMIsGuestInSvmNestedHwVirtMode(pCtx) ? CPUMHWVIRTDUMP_SVM
    2778                                  : CPUMIsGuestInVmxNestedHwVirtMode(pCtx) ? CPUMHWVIRTDUMP_VMX : CPUMHWVIRTDUMP_NONE;
     2777    bool const fSvm = pVM->cpum.ro.GuestFeatures.fSvm;
     2778    bool const fVmx = pVM->cpum.ro.GuestFeatures.fVmx;
     2779    uint8_t const idxHwvirtState = fSvm ? CPUMHWVIRTDUMP_SVM : (fVmx ? CPUMHWVIRTDUMP_VMX : CPUMHWVIRTDUMP_NONE);
    27792780    AssertCompile(CPUMHWVIRTDUMP_LAST <= RT_ELEMENTS(s_aHwvirtModes));
    27802781    Assert(idxHwvirtState < RT_ELEMENTS(s_aHwvirtModes));
     
    27882789
    27892790    if (fDumpState & CPUMHWVIRTDUMP_COMMON)
    2790     {
    2791         pHlp->pfnPrintf(pHlp, "fGif                           = %RTbool\n", pCtx->hwvirt.fGif);
    2792         pHlp->pfnPrintf(pHlp, "fLocalForcedActions            = %#RX32\n",  pCtx->hwvirt.fLocalForcedActions);
    2793     }
     2791        pHlp->pfnPrintf(pHlp, "fLocalForcedActions          = %#RX32\n",  pCtx->hwvirt.fLocalForcedActions);
     2792
    27942793    pHlp->pfnPrintf(pHlp, "%s hwvirt state%s\n", pcszHwvirtMode, (fDumpState & (CPUMHWVIRTDUMP_SVM | CPUMHWVIRTDUMP_VMX)) ?
    27952794                                                                 ":" : "");
    27962795    if (fDumpState & CPUMHWVIRTDUMP_SVM)
    27972796    {
     2797        pHlp->pfnPrintf(pHlp, "  fGif                       = %RTbool\n", pCtx->hwvirt.fGif);
     2798
    27982799        char szEFlags[80];
    27992800        cpumR3InfoFormatFlags(&szEFlags[0], pCtx->hwvirt.svm.HostState.rflags.u);
    2800 
    28012801        pHlp->pfnPrintf(pHlp, "  uMsrHSavePa                = %#RX64\n",    pCtx->hwvirt.svm.uMsrHSavePa);
    28022802        pHlp->pfnPrintf(pHlp, "  GCPhysVmcb                 = %#RGp\n",     pCtx->hwvirt.svm.GCPhysVmcb);
     
    28392839    }
    28402840
    2841     /** @todo Intel.  */
    2842 #if 0
    28432841    if (fDumpState & CPUMHWVIRTDUMP_VMX)
    28442842    {
     2843        pHlp->pfnPrintf(pHlp, "  fInVmxRootMode             = %RTbool\n",   pCtx->hwvirt.vmx.fInVmxRootMode);
     2844        pHlp->pfnPrintf(pHlp, "  fInVmxNonRootMode          = %RTbool\n",   pCtx->hwvirt.vmx.fInVmxNonRootMode);
     2845        pHlp->pfnPrintf(pHlp, "  GCPhysVmxon                = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysVmxon);
     2846        pHlp->pfnPrintf(pHlp, "  GCPhysVmcs                 = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysVmcs);
     2847        pHlp->pfnPrintf(pHlp, "  enmInstrDiag               = %u (%s)\n",   pCtx->hwvirt.vmx.enmInstrDiag,
     2848                        HMVmxGetInstrDiagDesc(pCtx->hwvirt.vmx.enmInstrDiag));
     2849        /** @todo NSTVMX: Dump remaining/new fields. */
    28452850    }
    2846 #endif
    28472851
    28482852#undef CPUMHWVIRTDUMP_NONE
  • trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp

    r73389 r73606  
    39373937    AssertLogRelRCReturn(rc, rc);
    39383938
    3939 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    3940     /** @cfgm{/CPUM/NestedHWVirt, bool, false}
    3941      * Whether to expose the hardware virtualization (VMX/SVM) feature to the guest.
    3942      * The default is false, and when enabled requires nested paging and AMD-V or
    3943      * unrestricted guest mode.
    3944      */
    3945     rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedHWVirt", &pConfig->fNestedHWVirt, false);
    3946     AssertLogRelRCReturn(rc, rc);
    3947     if (   pConfig->fNestedHWVirt
    3948         && !fNestedPagingAndFullGuestExec)
    3949         return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
    3950                           "Cannot enable nested VT-x/AMD-V without nested-paging and unresricted guest execution!\n");
    3951 
    3952     /** @todo Think about enabling this later with NEM/KVM. */
    3953     if (   pConfig->fNestedHWVirt
    3954         && VM_IS_NEM_ENABLED(pVM))
    3955     {
    3956         LogRel(("CPUM: WARNING! Can't turn on nested VT-x/AMD-V when NEM is used!\n"));
    3957         pConfig->fNestedHWVirt = false;
    3958     }
     3939    bool fQueryNestedHwvirt = false;
     3940#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     3941    fQueryNestedHwvirt |= RT_BOOL(pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD);
    39593942#endif
     3943#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     3944    fQueryNestedHwvirt |= RT_BOOL(   pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL
     3945                                  || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA);
     3946#endif
     3947    if (fQueryNestedHwvirt)
     3948    {
     3949        /** @cfgm{/CPUM/NestedHWVirt, bool, false}
     3950         * Whether to expose the hardware virtualization (VMX/SVM) feature to the guest.
     3951         * The default is false, and when enabled requires nested paging and AMD-V or
     3952         * unrestricted guest mode.
     3953         */
     3954        rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedHWVirt", &pConfig->fNestedHWVirt, false);
     3955        AssertLogRelRCReturn(rc, rc);
     3956        if (   pConfig->fNestedHWVirt
     3957            && !fNestedPagingAndFullGuestExec)
     3958            return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
     3959                              "Cannot enable nested VT-x/AMD-V without nested-paging and unresricted guest execution!\n");
     3960
     3961        /** @todo Think about enabling this later with NEM/KVM. */
     3962        if (   pConfig->fNestedHWVirt
     3963            && VM_IS_NEM_ENABLED(pVM))
     3964        {
     3965            LogRel(("CPUM: WARNING! Can't turn on nested VT-x/AMD-V when NEM is used!\n"));
     3966            pConfig->fNestedHWVirt = false;
     3967        }
     3968    }
    39603969
    39613970    /*
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r73097 r73606  
    18151815    }
    18161816
    1817     if (CPUMIsGuestInVmxNestedHwVirtMode(&pVCpu->cpum.GstCtx))
     1817    if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    18181818    { /** @todo Nested VMX. */ }
    18191819
     
    21472147                Assert(!HMR3IsEventPending(pVCpu));
    21482148#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    2149                 if (CPUMIsGuestInNestedHwVirtMode(&pVCpu->cpum.GstCtx))
     2149                if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
    21502150                {
    21512151                    bool fResched, fInject;
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r73389 r73606  
    4242#include <VBox/vmm/stam.h>
    4343#include <VBox/vmm/mm.h>
     44#include <VBox/vmm/em.h>
    4445#include <VBox/vmm/pdmapi.h>
    4546#include <VBox/vmm/pgm.h>
     
    7778#define EXIT_REASON(def, val, str) #def " - " #val " - " str
    7879#define EXIT_REASON_NIL() NULL
    79 /** Exit reason descriptions for VT-x, used to describe statistics. */
    80 static const char * const g_apszVTxExitReasons[MAX_EXITREASON_STAT] =
     80/** Exit reason descriptions for VT-x, used to describe statistics and exit
     81 *  history. */
     82static const char * const g_apszVmxExitReasons[MAX_EXITREASON_STAT] =
    8183{
    8284    EXIT_REASON(VMX_EXIT_XCPT_OR_NMI            ,   0, "Exception or non-maskable interrupt (NMI)."),
     
    149151#define MAX_EXITREASON_VTX                         64
    150152
    151 /** A partial list of Exit reason descriptions for AMD-V, used to describe
    152  *  statistics.
     153/** A partial list of \#EXIT reason descriptions for AMD-V, used to describe
     154 *  statistics and exit history.
    153155 *
    154156 *  @note AMD-V have annoyingly large gaps (e.g. \#NPF VMEXIT comes at 1024),
    155157 *        this array doesn't contain the entire set of exit reasons, we
    156158 *        handle them via hmSvmGetSpecialExitReasonDesc(). */
    157 static const char * const g_apszAmdVExitReasons[MAX_EXITREASON_STAT] =
     159static const char * const g_apszSvmExitReasons[MAX_EXITREASON_STAT] =
    158160{
    159161    EXIT_REASON(SVM_EXIT_READ_CR0     ,    0, "Read CR0."),
     
    310312/**
    311313 * Gets the SVM exit reason if it's one of the reasons not present in the @c
    312  * g_apszAmdVExitReasons array.
     314 * g_apszSvmExitReasons array.
    313315 *
    314316 * @returns The exit reason or NULL if unknown.
     
    10611063#undef HM_REG_COUNTER
    10621064
    1063         const char *const *papszDesc = ASMIsIntelCpu() || ASMIsViaCentaurCpu() ? &g_apszVTxExitReasons[0]
    1064                                                                                : &g_apszAmdVExitReasons[0];
     1065        const char *const *papszDesc = ASMIsIntelCpu() || ASMIsViaCentaurCpu() ? &g_apszVmxExitReasons[0]
     1066                                                                               : &g_apszSvmExitReasons[0];
    10651067
    10661068        /*
     
    19381940    uint32_t u32Model;
    19391941    uint32_t u32Stepping;
    1940     if (HMAmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
     1942    if (HMSvmIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
    19411943        LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
    19421944    LogRel(("HM: Max resume loops                  = %u\n",     pVM->hm.s.cMaxResumeLoops));
     
    29482950
    29492951#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
    2950     if (CPUMIsGuestInNestedHwVirtMode(pCtx))
     2952    if (   CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
     2953        || CPUMIsGuestVmxEnabled(pCtx))
    29512954    {
    29522955        Log(("HMR3CanExecuteGuest: In nested-guest mode - returning false"));
     
    31513154        &&  CPUMIsGuestInRealModeEx(pCtx)
    31523155        && !PDMVmmDevHeapIsEnabled(pVM))
    3153     {
    31543156        return true;
    3155     }
    31563157
    31573158    return false;
     
    34293430                LogRel(("HM: CPU[%u] Exit reason          %#x\n", i, pVCpu->hm.s.vmx.LastError.u32ExitReason));
    34303431
    3431                 if (   pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMLAUCH_NON_CLEAR_VMCS
    3432                     || pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMRESUME_NON_LAUNCHED_VMCS)
     3432                if (   pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS
     3433                    || pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS)
    34333434                {
    34343435                    LogRel(("HM: CPU[%u] Entered Host Cpu     %u\n",  i, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
    34353436                    LogRel(("HM: CPU[%u] Current Host Cpu     %u\n",  i, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
    34363437                }
    3437                 else if (pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
     3438                else if (pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTL)
    34383439                {
    34393440                    LogRel(("HM: CPU[%u] PinCtls          %#RX32\n", i, pVCpu->hm.s.vmx.u32PinCtls));
     
    37563757VMMR3DECL(const char *) HMR3GetVmxExitName(uint32_t uExit)
    37573758{
    3758     if (uExit < RT_ELEMENTS(g_apszVTxExitReasons))
    3759         return g_apszVTxExitReasons[uExit];
     3759    if (uExit < RT_ELEMENTS(g_apszVmxExitReasons))
     3760        return g_apszVmxExitReasons[uExit];
    37603761    return NULL;
    37613762}
     
    37703771VMMR3DECL(const char *) HMR3GetSvmExitName(uint32_t uExit)
    37713772{
    3772     if (uExit < RT_ELEMENTS(g_apszAmdVExitReasons))
    3773         return g_apszAmdVExitReasons[uExit];
     3773    if (uExit < RT_ELEMENTS(g_apszSvmExitReasons))
     3774        return g_apszSvmExitReasons[uExit];
    37743775    return hmSvmGetSpecialExitReasonDesc(uExit);
    37753776}
  • trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp

    r73097 r73606  
    45494549    if (pVCpu->pgm.s.fA20Enabled != fEnable)
    45504550    {
     4551#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     4552        PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
     4553        if (   CPUMIsGuestInVmxRootMode(pCtx)
     4554            && !fEnable)
     4555        {
     4556            Log(("Cannot enter A20M mode while in VMX root mode\n"));
     4557            return;
     4558        }
     4559#endif
    45514560        pVCpu->pgm.s.fA20Enabled = fEnable;
    45524561        pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!fEnable << 20);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette