VirtualBox

Changeset 47652 in vbox


Ignore:
Timestamp:
Aug 9, 2013 2:56:17 PM (12 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
87854
Message:

VMM: Removed all VBOX_WITH_OLD_[VTX|AMDV]_CODE bits.

Location:
trunk/src/VBox/VMM
Files:
12 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r47646 r47652  
    526526        VMMR0/HMR0.cpp \
    527527        VMMR0/HMR0A.asm \
     528        VMMR0/HMVMXR0.cpp \
     529        VMMR0/HMSVMR0.cpp \
    528530        VMMR0/PDMR0Device.cpp \
    529531        VMMR0/PDMR0Driver.cpp \
     
    583585        VMMAll/PDMAllNetShaper.cpp
    584586 endif
    585  ifdef VBOX_WITH_OLD_VTX_CODE
    586   VMMR0_SOURCES += VMMR0/HWVMXR0.cpp
    587  else
    588   VMMR0_SOURCES += VMMR0/HMVMXR0.cpp
    589  endif
    590  ifdef VBOX_WITH_OLD_AMDV_CODE
    591   VMMR0_SOURCES += VMMR0/HWSVMR0.cpp
    592  else
    593   VMMR0_SOURCES += VMMR0/HMSVMR0.cpp
    594  endif
    595587 VMMR0_SOURCES.amd64 = \
    596588        VMMR0/VMMR0JmpA-amd64.asm
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r47328 r47652  
    24242424VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
    24252425{
    2426 #ifdef VBOX_WITH_OLD_VTX_CODE
    2427     return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
    2428         && (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
    2429         && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
    2430 #else
    24312426    return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
    24322427        && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
    24332428        && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LME);
    2434 #endif
    24352429}
    24362430
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r47472 r47652  
    15151515       guests, so we must make sure the recompiler flushes its TLB the next
    15161516       time it executes code. */
    1517     if (    pVM->hm.s.fNestedPaging
    1518 #ifdef VBOX_WITH_OLD_VTX_CODE
    1519         &&  CPUMIsGuestInPagedProtectedModeEx(pCtx)
    1520 #else
    1521         &&  CPUMIsGuestPagingEnabledEx(pCtx)
    1522 #endif
    1523        )
     1517    if (   pVM->hm.s.fNestedPaging
     1518        && CPUMIsGuestPagingEnabledEx(pCtx))
    15241519    {
    15251520        CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r47608 r47652  
    6161%endif
    6262
    63 %ifndef VBOX_WITH_OLD_VTX_CODE
    64  %ifdef RT_ARCH_AMD64
    65   %define VBOX_SKIP_RESTORE_SEG
    66  %endif
     63%ifdef RT_ARCH_AMD64
     64 %define VBOX_SKIP_RESTORE_SEG
    6765%endif
    6866
  • trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac

    r47610 r47652  
    1919;
    2020
    21 %ifndef VBOX_WITH_OLD_VTX_CODE
    22  %ifdef RT_ARCH_AMD64
    23   ;;
    24   ; Keep these macro definitions in this file as it gets included and compiled
    25   ; with RT_ARCH_AMD64 once and RT_ARCH_X86 once.
    26   %define VMX_SKIP_GDTR
    27   %ifndef RT_OS_DARWIN
    28    ; Darwin (Mavericks) uses IDTR limit to store the CPUID so we need to restore it always. See @bugref{6875}.
    29    %define VMX_SKIP_IDTR
    30   %endif
    31   %define VMX_SKIP_TR
     21%ifdef RT_ARCH_AMD64
     22 ;;
     23 ; Keep these macro definitions in this file as it gets included and compiled
     24 ; with RT_ARCH_AMD64 once and RT_ARCH_X86 once.
     25 %define VMX_SKIP_GDTR
     26 %ifndef RT_OS_DARWIN
     27  ; Darwin (Mavericks) uses IDTR limit to store the CPUID so we need to restore it always. See @bugref{6875}.
     28  %define VMX_SKIP_IDTR
    3229 %endif
     30 %define VMX_SKIP_TR
    3331%endif
    3432
     
    198196    mov     [ss:xDI + CPUMCTX.esi], esi
    199197    mov     [ss:xDI + CPUMCTX.ebp], ebp
    200 %ifndef VBOX_WITH_OLD_VTX_CODE
    201198    mov     xAX, cr2
    202199    mov     [ss:xDI + CPUMCTX.cr2], xAX
    203 %endif
    204200
    205201%ifdef RT_ARCH_AMD64
     
    251247    jnz     .cached_read
    252248.no_cached_reads:
    253 
    254 %ifdef VBOX_WITH_OLD_VTX_CODE
    255     ; Restore CR2 into VMCS-cache field (for EPT).
    256     mov     xAX, cr2
    257     mov     [ss:xDX + VMCSCACHE.cr2], xAX
    258 %endif
    259249%endif
    260250
     
    431421    LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
    432422    LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    433 %else
    434 %ifdef VBOX_WITH_OLD_VTX_CODE
    435     ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
    436     LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    437 %endif
    438423%endif
    439424
     
    545530    mov     qword [xDI + CPUMCTX.r14], r14
    546531    mov     qword [xDI + CPUMCTX.r15], r15
    547 %ifndef VBOX_WITH_OLD_VTX_CODE
    548532    mov     rax, cr2
    549533    mov     qword [xDI + CPUMCTX.cr2], rax
    550 %endif
    551534
    552535    pop     xAX                                 ; The guest edi we pushed above
     
    582565    LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
    583566    LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
    584 %else
    585 %ifdef VBOX_WITH_OLD_VTX_CODE
    586     ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
    587     LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    588 %endif
    589567%endif
    590568
     
    605583    jnz     .cached_read
    606584.no_cached_reads:
    607 
    608 %ifdef VBOX_WITH_OLD_VTX_CODE
    609     ; Restore CR2 into VMCS-cache field (for EPT).
    610     mov     xAX, cr2
    611     mov     [xDX + VMCSCACHE.cr2], xAX
    612 %endif
    613585%endif
    614586
     
    664636    LOADHOSTMSR MSR_K6_STAR
    665637    LOADHOSTMSR MSR_K8_LSTAR
    666 %else
    667 %ifdef VBOX_WITH_OLD_VTX_CODE
    668     ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
    669     LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
    670 %endif
    671638%endif
    672639
     
    720687    LOADHOSTMSR MSR_K6_STAR
    721688    LOADHOSTMSR MSR_K8_LSTAR
    722 %else
    723 %ifdef VBOX_WITH_OLD_VTX_CODE
    724     ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
    725     LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
    726 %endif
    727689%endif
    728690
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r47514 r47652  
    13281328    pCtx->dr[7] &= 0xffffffff;                                                /* Upper 32 bits MBZ. */
    13291329    pCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15));      /* MBZ. */
    1330     pCtx->dr[7] |= 0x400;                                                     /* MB1. */
     1330    pCtx->dr[7] |= X86_DR7_INIT_VAL;                                          /* MB1. */
    13311331
    13321332    /* Update DR6, DR7 with the guest values. */
     
    16051605
    16061606    /* Clear any unused and reserved bits. */
    1607     pVCpu->hm.s.fContextUseFlags &= ~(  HM_CHANGED_GUEST_MSR                /* Unused (legacy). */
    1608                                       | HM_CHANGED_GUEST_RIP                /* Unused (loaded unconditionally). */
     1607    pVCpu->hm.s.fContextUseFlags &= ~(  HM_CHANGED_GUEST_RIP                /* Unused (loaded unconditionally). */
    16091608                                      | HM_CHANGED_GUEST_RSP
    16101609                                      | HM_CHANGED_GUEST_RFLAGS
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r47643 r47652  
    68786878
    68796879    /* Clear any unused and reserved bits. */
    6880     pVCpu->hm.s.fContextUseFlags &= ~(  HM_CHANGED_GUEST_CR2
    6881                                       | HM_CHANGED_GUEST_MSR  /* legacy */);
     6880    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;
    68826881
    68836882    AssertMsg(!pVCpu->hm.s.fContextUseFlags,
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.h

    r47473 r47652  
    6262#endif
    6363
    64 #ifdef VBOX_WITH_OLD_VTX_CODE
    65 # ifdef VMX_USE_CACHED_VMCS_ACCESSES
    66 #  define VMXReadCachedVmcs(idxField, pVal)              VMXReadCachedVmcsEx(pVCpu, idxField##_CACHE_IDX, pVal)
    67 # else
    68 #  define VMXReadCachedVmcs                              VMXReadVmcsField
    69 # endif
    70 #  define VMXReadVmcs                                    VMXReadVmcsField
    71 #else /* !VBOX_WITH_OLD_VTX_CODE */
    7264# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    7365#  define VMXReadVmcsHstN(idxField, p64Val)               HMVMX_IS_64BIT_HOST_MODE() ?                      \
     
    8880#  define VMXReadVmcsGstNByIdxVal                         VMXReadVmcs64
    8981# endif
    90 #endif  /* !VBOX_WITH_OLD_VTX_CODE */
    9182
    9283#endif /* IN_RING0 */
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r47635 r47652  
    975975    RTGCPHYS    GCPhys = 0;
    976976
    977 #ifndef VBOX_WITH_OLD_VTX_CODE
    978977    LogRel(("HM: Using VT-x implementation 2.0!\n"));
    979 #endif
    980978    LogRel(("HM: Host CR4                      = %#RX64\n", pVM->hm.s.vmx.hostCR4));
    981979    LogRel(("HM: MSR_IA32_FEATURE_CONTROL      = %#RX64\n", pVM->hm.s.vmx.msr.feature_ctrl));
     
    13211319        LogRel(("HM: Ignoring VPID capabilities of CPU.\n"));
    13221320
    1323     /** TPR patching would never have worked on Intel. Leaving it here for the old
    1324      *  code's sake. See @bugref{6398}. */
    1325 #ifdef VBOX_WITH_OLD_VTX_CODE
    1326     /*
    1327      * TPR patching status logging.
    1328      */
    1329     if (pVM->hm.s.fTRPPatchingAllowed)
    1330     {
    1331         if (    (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    1332             &&  (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
    1333         {
    1334             pVM->hm.s.fTRPPatchingAllowed = false;  /* not necessary as we have a hardware solution. */
    1335             LogRel(("HM: TPR Patching not required (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).\n"));
    1336         }
    1337         else
    1338         {
    1339             uint32_t u32Eax, u32Dummy;
    1340 
    1341             /* TPR patching needs access to the MSR_K8_LSTAR msr. */
    1342             ASMCpuId(0x80000000, &u32Eax, &u32Dummy, &u32Dummy, &u32Dummy);
    1343             if (    u32Eax < 0x80000001
    1344                 ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
    1345             {
    1346                 pVM->hm.s.fTRPPatchingAllowed = false;
    1347                 LogRel(("HM: TPR patching disabled (long mode not supported).\n"));
    1348             }
    1349         }
    1350     }
    1351     LogRel(("HM: TPR Patching %s.\n", (pVM->hm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
    1352 #endif
    1353 
    1354 
    13551321    /*
    13561322     * Check for preemption timer config override and log the state of it.
     
    13811347    Log(("pVM->hm.s.svm.fSupported = %d\n", pVM->hm.s.svm.fSupported));
    13821348
    1383 #ifndef VBOX_WITH_OLD_AMDV_CODE
    13841349    LogRel(("HM: Using AMD-V implementation 2.0!\n"));
    1385 #endif
    13861350
    13871351    uint32_t u32Family;
     
    15101474        {
    15111475            PVMCPU pVCpu = &pVM->aCpus[i];
    1512 
    1513             pVCpu->hm.s.enmShadowMode            = PGMGetShadowMode(pVCpu);
    1514 #ifdef VBOX_WITH_OLD_VTX_CODE
    1515             Assert(pVCpu->hm.s.vmx.enmCurrGuestMode == PGMGetGuestMode(pVCpu));
    1516             pVCpu->hm.s.vmx.enmCurrGuestMode     = PGMGetGuestMode(pVCpu);
    1517 #endif
     1476            pVCpu->hm.s.enmShadowMode = PGMGetShadowMode(pVCpu);
    15181477        }
    15191478    }
     
    15611520    pVCpu->hm.s.enmShadowMode = enmShadowMode;
    15621521
    1563 #ifdef VBOX_WITH_OLD_VTX_CODE
    1564     if (   pVM->hm.s.vmx.fEnabled
    1565         && HMIsEnabled(pVM))
    1566     {
    1567         if (    pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
    1568             &&  enmGuestMode >= PGMMODE_PROTECTED)
    1569         {
    1570             PCPUMCTX pCtx;
    1571 
    1572             pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    1573 
    1574             /* After a real mode switch to protected mode we must force
    1575                CPL to 0. Our real mode emulation had to set it to 3. */
    1576             pCtx->ss.Attr.n.u2Dpl  = 0;
    1577         }
    1578     }
    1579 
    1580     if (pVCpu->hm.s.vmx.enmCurrGuestMode != enmGuestMode)
    1581     {
    1582         /* Keep track of paging mode changes. */
    1583         pVCpu->hm.s.vmx.enmPrevGuestMode = pVCpu->hm.s.vmx.enmCurrGuestMode;
    1584         pVCpu->hm.s.vmx.enmCurrGuestMode = enmGuestMode;
    1585 
    1586         /* Did we miss a change, because all code was executed in the recompiler? */
    1587         if (pVCpu->hm.s.vmx.enmLastSeenGuestMode == enmGuestMode)
    1588         {
    1589             Log(("HMR3PagingModeChanged missed %s->%s transition (prev %s)\n", PGMGetModeName(pVCpu->hm.s.vmx.enmPrevGuestMode),
    1590                  PGMGetModeName(pVCpu->hm.s.vmx.enmCurrGuestMode), PGMGetModeName(pVCpu->hm.s.vmx.enmLastSeenGuestMode)));
    1591             pVCpu->hm.s.vmx.enmLastSeenGuestMode = pVCpu->hm.s.vmx.enmPrevGuestMode;
    1592         }
    1593     }
    1594 #else
    1595     /* If the guest left protected mode VMX execution, we'll have to be extra
    1596      * careful if/when the guest switches back to protected mode.
     1522    /*
     1523     * If the guest left protected mode VMX execution, we'll have to be
     1524     * extra careful if/when the guest switches back to protected mode.
    15971525     */
    15981526    if (enmGuestMode == PGMMODE_REAL)
     
    16011529        pVCpu->hm.s.vmx.fWasInRealMode = true;
    16021530    }
    1603 #endif
    1604 
    1605     /** @todo r=ramshankar: Why do we need to do this? */
     1531
     1532    /** @todo r=ramshankar: Disabling for now. If nothing breaks remove it
     1533     *        eventually. (Test platforms that use the cache ofc). */
     1534#if 0
    16061535#ifdef VMX_USE_CACHED_VMCS_ACCESSES
    16071536    /* Reset the contents of the read cache. */
     
    16101539        pCache->Read.aFieldVal[j] = 0;
    16111540#endif
     1541#endif
    16121542}
    16131543
     
    16831613    pVCpu->hm.s.fContextUseFlags = (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST);
    16841614
    1685     pVCpu->hm.s.vmx.u32CR0Mask = 0;
    1686     pVCpu->hm.s.vmx.u32CR4Mask = 0;
    1687 
    1688     pVCpu->hm.s.fActive        = false;
    1689     pVCpu->hm.s.Event.fPending = false;
    1690 
    1691 #ifdef VBOX_WITH_OLD_VTX_CODE
    1692     /* Reset state information for real-mode emulation in VT-x. */
    1693     pVCpu->hm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
    1694     pVCpu->hm.s.vmx.enmPrevGuestMode     = PGMMODE_REAL;
    1695     pVCpu->hm.s.vmx.enmCurrGuestMode     = PGMMODE_REAL;
    1696 #else
     1615    pVCpu->hm.s.vmx.u32CR0Mask     = 0;
     1616    pVCpu->hm.s.vmx.u32CR4Mask     = 0;
     1617    pVCpu->hm.s.fActive            = false;
     1618    pVCpu->hm.s.Event.fPending     = false;
    16971619    pVCpu->hm.s.vmx.fWasInRealMode = true;
    1698 #endif
    16991620
    17001621    /* Reset the contents of the read cache. */
     
    25662487                /* Verify the requirements for executing code in protected
    25672488                   mode. VT-x can't handle the CPU state right after a switch
    2568                    from real to protected mode. (all sorts of RPL & DPL assumptions) */
    2569 #if VBOX_WITH_OLD_VTX_CODE
    2570                 if (    pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
    2571                     &&  enmGuestMode >= PGMMODE_PROTECTED)
    2572 #else
     2489                   from real to protected mode. (all sorts of RPL & DPL assumptions). */
    25732490                if (pVCpu->hm.s.vmx.fWasInRealMode)
    2574 #endif
    25752491                {
    2576                     //@todo: If guest is in V86 mode, these checks should be different!
    2577 #if VBOX_WITH_OLD_VTX_CODE
    2578                     if (   (pCtx->cs.Sel & X86_SEL_RPL)
    2579                         || (pCtx->ds.Sel & X86_SEL_RPL)
    2580                         || (pCtx->es.Sel & X86_SEL_RPL)
    2581                         || (pCtx->fs.Sel & X86_SEL_RPL)
    2582                         || (pCtx->gs.Sel & X86_SEL_RPL)
    2583                         || (pCtx->ss.Sel & X86_SEL_RPL))
    2584                     {
    2585                         STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
    2586                         return false;
    2587                     }
    2588 #else
     2492                    /** @todo If guest is in V86 mode, these checks should be different! */
    25892493                    if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))
    25902494                    {
     
    26022506                        return false;
    26032507                    }
    2604 #endif
    26052508                }
    2606                 /* VT-x also chokes on invalid tr or ldtr selectors (minix) */
     2509                /* VT-x also chokes on invalid TR or LDTR selectors (minix). */
    26072510                if (pCtx->gdtr.cbGdt)
    26082511                {
     
    26262529                &&  !pVM->hm.s.vmx.fUnrestrictedGuest)
    26272530            {
    2628 #ifdef VBOX_WITH_OLD_VTX_CODE
    2629                 /** @todo   This should (probably) be set on every excursion to the REM,
    2630                  *          however it's too risky right now. So, only apply it when we go
    2631                  *          back to REM for real mode execution. (The XP hack below doesn't
    2632                  *          work reliably without this.)
    2633                  *  Update: Implemented in EM.cpp, see #ifdef EM_NOTIFY_HM.  */
    2634                 for (uint32_t i = 0; i < pVM->cCpus; i++)
    2635                     pVM->aCpus[i].hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
    2636 #endif
    2637 
    2638                 if (    !pVM->hm.s.fNestedPaging        /* requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
    2639                     ||  CPUMIsGuestInRealModeEx(pCtx))  /* requires a fake TSS for real mode - stored in the VMM device heap */
     2531                if (    !pVM->hm.s.fNestedPaging        /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
     2532                    ||  CPUMIsGuestInRealModeEx(pCtx))  /* Requires a fake TSS for real mode - stored in the VMM device heap */
    26402533                    return false;
    26412534
     
    26582551                if (pCtx->rsp >= pCtx->ss.u32Limit)
    26592552                    return false;
    2660 #if 0
    2661                 if (    pCtx->cs.Sel >= pCtx->gdtr.cbGdt
    2662                     ||  pCtx->ss.Sel >= pCtx->gdtr.cbGdt
    2663                     ||  pCtx->ds.Sel >= pCtx->gdtr.cbGdt
    2664                     ||  pCtx->es.Sel >= pCtx->gdtr.cbGdt
    2665                     ||  pCtx->fs.Sel >= pCtx->gdtr.cbGdt
    2666                     ||  pCtx->gs.Sel >= pCtx->gdtr.cbGdt)
    2667                     return false;
    2668 #endif
    26692553            }
    26702554        }
     
    27302614     * when the unrestricted guest execution feature is missing (VT-x only).
    27312615     */
    2732 #ifdef VBOX_WITH_OLD_VTX_CODE
    2733     if (   pVM->hm.s.vmx.fEnabled
    2734         && !pVM->hm.s.vmx.fUnrestrictedGuest
    2735         && !CPUMIsGuestInPagedProtectedModeEx(pCtx)
    2736         && !PDMVmmDevHeapIsEnabled(pVM)
    2737         && (pVM->hm.s.fNestedPaging || CPUMIsGuestInRealModeEx(pCtx)))
    2738         return true;
    2739 #else
    27402616    if (   pVM->hm.s.vmx.fEnabled
    27412617        && !pVM->hm.s.vmx.fUnrestrictedGuest
    27422618        && CPUMIsGuestInRealModeEx(pCtx)
    27432619        && !PDMVmmDevHeapIsEnabled(pVM))
     2620    {
    27442621        return true;
    2745 #endif
     2622    }
    27462623
    27472624    return false;
     
    30272904        AssertRCReturn(rc, rc);
    30282905
    3029 #ifdef VBOX_WITH_OLD_VTX_CODE
    3030         rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmLastSeenGuestMode);
    3031         AssertRCReturn(rc, rc);
    3032         rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmCurrGuestMode);
    3033         AssertRCReturn(rc, rc);
    3034         rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmPrevGuestMode);
    3035         AssertRCReturn(rc, rc);
    3036 #else
    3037         //@todo: We only need to save pVM->aCpus[i].hm.s.vmx.fWasInRealMode and
    3038         // perhaps not even that (the initial value of 'true' is safe).
     2906        /** @todo We only need to save pVM->aCpus[i].hm.s.vmx.fWasInRealMode and
     2907         *        perhaps not even that (the initial value of @c true is safe. */
    30392908        uint32_t u32Dummy = PGMMODE_REAL;
    30402909        rc = SSMR3PutU32(pSSM, u32Dummy);
     
    30442913        rc = SSMR3PutU32(pSSM, u32Dummy);
    30452914        AssertRCReturn(rc, rc);
    3046 #endif
    3047     }
     2915    }
     2916
    30482917#ifdef VBOX_HM_WITH_GUEST_PATCHING
    30492918    rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem);
     
    31363005        {
    31373006            uint32_t val;
    3138 
    3139 #ifdef VBOX_WITH_OLD_VTX_CODE
    3140             rc = SSMR3GetU32(pSSM, &val);
    3141             AssertRCReturn(rc, rc);
    3142             pVM->aCpus[i].hm.s.vmx.enmLastSeenGuestMode = (PGMMODE)val;
    3143 
    3144             rc = SSMR3GetU32(pSSM, &val);
    3145             AssertRCReturn(rc, rc);
    3146             pVM->aCpus[i].hm.s.vmx.enmCurrGuestMode = (PGMMODE)val;
    3147 
    3148             rc = SSMR3GetU32(pSSM, &val);
    3149             AssertRCReturn(rc, rc);
    3150             pVM->aCpus[i].hm.s.vmx.enmPrevGuestMode = (PGMMODE)val;
    3151 #else
    3152             //@todo: See note above re saving enmLastSeenGuestMode
     3007            /** @todo See note in hmR3Save(). */
    31533008            rc = SSMR3GetU32(pSSM, &val);
    31543009            AssertRCReturn(rc, rc);
     
    31573012            rc = SSMR3GetU32(pSSM, &val);
    31583013            AssertRCReturn(rc, rc);
    3159 #endif
    31603014        }
    31613015    }
  • trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac

    r46099 r47652  
    707707    ; Manual save and restore:
    708708    ;  - General purpose registers except RIP, RSP
    709     ; 
     709    ;
    710710    ; Trashed:
    711711    ;  - CR2 (we don't care)
     
    721721    LOADGUESTMSR MSR_K8_SF_MASK,        CPUMCTX.msrSFMASK
    722722    LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    723 %else
    724 %ifdef VBOX_WITH_OLD_VTX_CODE
    725     ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
    726     LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    727 %endif
    728723%endif
    729724
     
    792787    mov     qword [rdi + CPUMCTX.r14], r14
    793788    mov     qword [rdi + CPUMCTX.r15], r15
    794 %ifndef VBOX_WITH_OLD_VTX_CODE
    795789    mov     rax, cr2
    796790    mov     qword [rdi + CPUMCTX.cr2], rax
    797 %endif
    798791
    799792    pop     rax         ; The guest edi we pushed above
     
    807800    SAVEGUESTMSR MSR_K6_STAR,           CPUMCTX.msrSTAR
    808801    SAVEGUESTMSR MSR_K8_LSTAR,          CPUMCTX.msrLSTAR
    809 %else
    810 %ifdef VBOX_WITH_OLD_VTX_CODE
    811     ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
    812     SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    813 %endif
    814802%endif
    815803
     
    840828    jnz     .cached_read
    841829.no_cached_reads:
    842 
    843  %ifdef VBOX_WITH_OLD_VTX_CODE
    844     ; Restore CR2 into VMCS-cache field (for EPT).
    845     mov     rax, cr2
    846     mov     [rdi + VMCSCACHE.cr2], rax
    847  %endif
    848830 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
    849831    mov     dword [rdi + VMCSCACHE.uPos], 8
     
    955937    ; Manual save and restore:
    956938    ;  - General purpose registers except RIP, RSP, RAX
    957     ; 
     939    ;
    958940    ; Trashed:
    959941    ;  - CR2 (we don't care)
  • trunk/src/VBox/VMM/include/HMInternal.h

    r47619 r47652  
    3838#endif
    3939
    40 #ifdef VBOX_WITH_OLD_VTX_CODE
    41 # define VMX_USE_CACHED_VMCS_ACCESSES
    42 #elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     40#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    4341# define VMX_USE_CACHED_VMCS_ACCESSES
    4442#endif
     
    8886#define HM_CHANGED_GUEST_LDTR                    RT_BIT(5)
    8987#define HM_CHANGED_GUEST_TR                      RT_BIT(6)
    90 #define HM_CHANGED_GUEST_MSR                     RT_BIT(7)  /* Unused in new VT-x, AMD-V code. */
    91 #define HM_CHANGED_GUEST_SEGMENT_REGS            RT_BIT(8)
    92 #define HM_CHANGED_GUEST_DEBUG                   RT_BIT(9)
    93 #define HM_CHANGED_ALL_GUEST_BASE                (  HM_CHANGED_GUEST_CR0          \
    94                                                   | HM_CHANGED_GUEST_CR3          \
    95                                                   | HM_CHANGED_GUEST_CR4          \
    96                                                   | HM_CHANGED_GUEST_GDTR         \
    97                                                   | HM_CHANGED_GUEST_IDTR         \
    98                                                   | HM_CHANGED_GUEST_LDTR         \
    99                                                   | HM_CHANGED_GUEST_TR           \
    100                                                   | HM_CHANGED_GUEST_MSR          \
    101                                                   | HM_CHANGED_GUEST_SEGMENT_REGS \
    102                                                   | HM_CHANGED_GUEST_DEBUG)
    103 #define HM_CHANGED_ALL_GUEST                     HM_CHANGED_ALL_GUEST_BASE
    104 
    105 /** New VT-x, AMD-V code uses extra flags for more fine-grained state
    106  *  tracking. */
    107 #if !defined(VBOX_WITH_OLD_VTX_CODE) || !defined(VBOX_WITH_OLD_AMDV_CODE)
    108 # define HM_CHANGED_GUEST_RIP                    RT_BIT(10)
    109 # define HM_CHANGED_GUEST_RSP                    RT_BIT(11)
    110 # define HM_CHANGED_GUEST_RFLAGS                 RT_BIT(12)
    111 # define HM_CHANGED_GUEST_CR2                    RT_BIT(13)
    112 # define HM_CHANGED_GUEST_SYSENTER_CS_MSR        RT_BIT(14)
    113 # define HM_CHANGED_GUEST_SYSENTER_EIP_MSR       RT_BIT(15)
    114 # define HM_CHANGED_GUEST_SYSENTER_ESP_MSR       RT_BIT(16)
     88#define HM_CHANGED_GUEST_SEGMENT_REGS            RT_BIT(7)
     89#define HM_CHANGED_GUEST_DEBUG                   RT_BIT(8)
     90# define HM_CHANGED_GUEST_RIP                    RT_BIT(9)
     91# define HM_CHANGED_GUEST_RSP                    RT_BIT(10)
     92# define HM_CHANGED_GUEST_RFLAGS                 RT_BIT(11)
     93# define HM_CHANGED_GUEST_CR2                    RT_BIT(12)
     94# define HM_CHANGED_GUEST_SYSENTER_CS_MSR        RT_BIT(13)
     95# define HM_CHANGED_GUEST_SYSENTER_EIP_MSR       RT_BIT(14)
     96# define HM_CHANGED_GUEST_SYSENTER_ESP_MSR       RT_BIT(15)
    11597/* VT-x specific state. */
    116 # define HM_CHANGED_VMX_GUEST_AUTO_MSRS          RT_BIT(17)
    117 # define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE     RT_BIT(18)
    118 # define HM_CHANGED_VMX_GUEST_APIC_STATE         RT_BIT(19)
    119 # define HM_CHANGED_VMX_ENTRY_CTLS               RT_BIT(20)
    120 # define HM_CHANGED_VMX_EXIT_CTLS                RT_BIT(21)
     98# define HM_CHANGED_VMX_GUEST_AUTO_MSRS          RT_BIT(16)
     99# define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE     RT_BIT(17)
     100# define HM_CHANGED_VMX_GUEST_APIC_STATE         RT_BIT(18)
     101# define HM_CHANGED_VMX_ENTRY_CTLS               RT_BIT(19)
     102# define HM_CHANGED_VMX_EXIT_CTLS                RT_BIT(20)
    121103/* AMD-V specific state. */
    122 # define HM_CHANGED_SVM_GUEST_EFER_MSR           RT_BIT(17)
    123 # define HM_CHANGED_SVM_GUEST_APIC_STATE         RT_BIT(18)
    124 # define HM_CHANGED_SVM_RESERVED1                RT_BIT(19)
    125 # define HM_CHANGED_SVM_RESERVED2                RT_BIT(20)
    126 # define HM_CHANGED_SVM_RESERVED3                RT_BIT(21)
    127 
    128 # undef  HM_CHANGED_ALL_GUEST
    129 # define HM_CHANGED_ALL_GUEST                   (  HM_CHANGED_ALL_GUEST_BASE           \
     104# define HM_CHANGED_SVM_GUEST_EFER_MSR           RT_BIT(16)
     105# define HM_CHANGED_SVM_GUEST_APIC_STATE         RT_BIT(17)
     106# define HM_CHANGED_SVM_RESERVED1                RT_BIT(18)
     107# define HM_CHANGED_SVM_RESERVED2                RT_BIT(19)
     108# define HM_CHANGED_SVM_RESERVED3                RT_BIT(20)
     109
     110# define HM_CHANGED_ALL_GUEST                   (  HM_CHANGED_GUEST_CR0                \
     111                                                 | HM_CHANGED_GUEST_CR3                \
     112                                                 | HM_CHANGED_GUEST_CR4                \
     113                                                 | HM_CHANGED_GUEST_GDTR               \
     114                                                 | HM_CHANGED_GUEST_IDTR               \
     115                                                 | HM_CHANGED_GUEST_LDTR               \
     116                                                 | HM_CHANGED_GUEST_TR                 \
     117                                                 | HM_CHANGED_GUEST_SEGMENT_REGS       \
     118                                                 | HM_CHANGED_GUEST_DEBUG              \
    130119                                                 | HM_CHANGED_GUEST_RIP                \
    131120                                                 | HM_CHANGED_GUEST_RSP                \
     
    140129                                                 | HM_CHANGED_VMX_ENTRY_CTLS           \
    141130                                                 | HM_CHANGED_VMX_EXIT_CTLS)
    142 #endif
    143 
    144 #define HM_CHANGED_HOST_CONTEXT                 RT_BIT(22)
     131
     132#define HM_CHANGED_HOST_CONTEXT                 RT_BIT(21)
    145133/** @} */
    146134
     
    355343#endif
    356344
    357 #ifndef VBOX_WITH_OLD_VTX_CODE
     345        /** Internal Id of which flush-handler to use for tagged-TLB entries. */
    358346        unsigned                    uFlushTaggedTlb;
    359 #else
    360         /** Ring 0 handlers for VT-x. */
    361         DECLR0CALLBACKMEMBER(void, pfnFlushTaggedTlb, (PVM pVM, PVMCPU pVCpu));
    362 #endif
    363347
    364348#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
     
    665649        } LastError;
    666650
    667 #ifdef VBOX_WITH_OLD_VTX_CODE
    668         /** The last seen guest paging mode (by VT-x). */
    669         PGMMODE                     enmLastSeenGuestMode;
    670         /** Current guest paging mode (as seen by HMR3PagingModeChanged). */
    671         PGMMODE                     enmCurrGuestMode;
    672         /** Previous guest paging mode (as seen by HMR3PagingModeChanged). */
    673         PGMMODE                     enmPrevGuestMode;
    674 #else
    675651        /** Which host-state bits to restore before being preempted. */
    676652        uint32_t                    fRestoreHostFlags;
     
    679655        /** Set if guest was executing in real mode (extra checks). */
    680656        bool                        fWasInRealMode;
    681 #endif
    682657    } vmx;
    683658
  • trunk/src/VBox/VMM/include/HMInternal.mac

    r46038 r47652  
    1515;
    1616
    17 %ifdef VBOX_WITH_OLD_VTX_CODE
    18 %define VMX_USE_CACHED_VMCS_ACCESSES
    19 %else
    2017%if HC_ARCH_BITS == 32
    21 %ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
    22 %define VMX_USE_CACHED_VMCS_ACCESSES
    23 %endif  ; VBOX_WITH_HYBRID_32BIT_KERNEL
    24 %endif   ; HC_ARCH_BITS == 32
    25 %endif    ; VBOX_WITH_OLD_VTX_CODE
     18 %ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
     19  %define VMX_USE_CACHED_VMCS_ACCESSES
     20 %endif
     21%endif
    2622
    2723%define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette