VirtualBox

Changeset 42024 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Jul 5, 2012 12:10:53 PM (13 years ago)
Author:
vboxsync
Message:

VMM: RDTSCP support on Intel. Segregated some common CPU features from the AMD superset into Extended features as they're now available on Intel too.

Location:
trunk/src/VBox/VMM
Files:
11 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r41976 r42024  
    10421042
    10431043            /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */
    1044             if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_NX)
     1044            if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX)
    10451045                fMask |= MSR_K6_EFER_NXE;
    1046             if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
     1046            if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
    10471047                fMask |= MSR_K6_EFER_LME;
    1048             if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_SEP)
     1048            if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
    10491049                fMask |= MSR_K6_EFER_SCE;
    10501050            if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
     
    15841584        {
    15851585            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
    1586                 ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP))
     1586                ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
    15871587            {
    15881588#if HC_ARCH_BITS == 32
    1589                 /* X86_CPUID_AMD_FEATURE_EDX_SEP not set it seems in 32 bits mode.
     1589                /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32 bits mode.
    15901590                 * Even when the cpu is capable of doing so in 64 bits mode.
    15911591                 */
    15921592                if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
    1593                     ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
    1594                     ||  !(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
     1593                    ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
     1594                    ||  !(ASMCpuId_EDX(1) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
    15951595#endif
    15961596                {
     
    16001600            }
    16011601            /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
    1602             pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP;
     1602            pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
    16031603            LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n"));
    16041604            break;
     
    16331633        {
    16341634            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
    1635                 ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
     1635                ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
    16361636            {
    16371637                LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n"));
     
    16401640
    16411641            /* Valid for both Intel and AMD. */
    1642             pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
     1642            pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
    16431643            LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n"));
    16441644            break;
     
    16461646
    16471647        /*
    1648          * Set the NXE bit in the extended feature mask.
     1648         * Set the NX/XD bit in the extended feature mask.
    16491649         * Assumes the caller knows what it's doing! (host must support these)
    16501650         */
    1651         case CPUMCPUIDFEATURE_NXE:
     1651        case CPUMCPUIDFEATURE_NX:
    16521652        {
    16531653            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
    1654                 ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
     1654                ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX))
    16551655            {
    1656                 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n"));
     1656                LogRel(("WARNING: Can't turn on NX/XD when the host doesn't support it!!\n"));
    16571657                return;
    16581658            }
    16591659
    16601660            /* Valid for both Intel and AMD. */
    1661             pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX;
    1662             LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n"));
    1663             break;
    1664         }
    1665 
     1661            pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_NX;
     1662            LogRel(("CPUMSetGuestCpuIdFeature: Enabled NX\n"));
     1663            break;
     1664        }
     1665
     1666        /*
     1667         * Set the LAHF/SAHF support in 64-bit mode.
     1668         * Assumes the caller knows what it's doing! (host must support this)
     1669         */
    16661670        case CPUMCPUIDFEATURE_LAHF:
    16671671        {
    16681672            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
    1669                 ||  !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
     1673                ||  !(ASMCpuId_ECX(0x80000001) & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
    16701674            {
    16711675                LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n"));
     
    16731677            }
    16741678
    1675             pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
     1679            /* Valid for both Intel and AMD. */
     1680            pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
    16761681            LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
    16771682            break;
     
    16891694        }
    16901695
     1696        /*
     1697         * Set the RDTSCP support bit.
     1698         * Assumes the caller knows what it's doing! (host must support this)
     1699         */
    16911700        case CPUMCPUIDFEATURE_RDTSCP:
    16921701        {
    16931702            if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
    1694                 ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_RDTSCP)
     1703                ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
    16951704                ||  pVM->cpum.s.u8PortableCpuIdLevel > 0)
    16961705            {
     
    17001709            }
    17011710
    1702             /* Valid for AMD only (for now). */
    1703             pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_RDTSCP;
     1711            /* Valid for both Intel and AMD. */
     1712            pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
    17041713            LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n"));
    17051714            break;
     
    17451754        }
    17461755
    1747         case CPUMCPUIDFEATURE_NXE:
     1756        case CPUMCPUIDFEATURE_NX:
    17481757        {
    17491758            if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
    1750                 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_NX);
     1759                return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_NX);
    17511760        }
    17521761
     
    17541763        {
    17551764            if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
    1756                 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
     1765                return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
    17571766            break;
    17581767        }
     
    17611770        {
    17621771            if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
    1763                 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
     1772                return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
    17641773            break;
    17651774        }
     
    18291838        {
    18301839            if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
    1831                 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_LONG_MODE;
     1840                pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
    18321841            break;
    18331842        }
     
    18361845        {
    18371846            if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
    1838                 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF;
     1847                pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
    18391848            break;
    18401849        }
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h

    r40266 r42024  
    982982{
    983983    /* AMD prefetch group, Intel implements this as NOP Ev (and so do we). */
    984     if (!IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(X86_CPUID_AMD_FEATURE_EDX_LONG_MODE | X86_CPUID_AMD_FEATURE_EDX_3DNOW,
     984    if (!IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(X86_CPUID_EXT_FEATURE_EDX_LONG_MODE | X86_CPUID_AMD_FEATURE_EDX_3DNOW,
    985985                                               X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF))
    986986    {
     
    86788678    IEMOP_HLP_NO_LOCK_PREFIX();
    86798679    if (   pIemCpu->enmCpuMode == IEMMODE_64BIT
    8680         && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
     8680        && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
    86818681        return IEMOP_RAISE_INVALID_OPCODE();
    86828682    IEM_MC_BEGIN(0, 2);
     
    87028702    IEMOP_HLP_NO_LOCK_PREFIX();
    87038703    if (   pIemCpu->enmCpuMode == IEMMODE_64BIT
    8704         && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF))
     8704        && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
    87058705        return IEMOP_RAISE_INVALID_OPCODE();
    87068706    IEM_MC_BEGIN(0, 1);
  • trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp

    r41965 r42024  
    215215            break;
    216216        case 0x11: case 0x08:
    217             AssertMsg(uErrorCode == 0,              ("Invalid uErrorCode=%#x u8TrapNo=%d\n", uErrorCode, pVCpu->trpm.s.uActiveVector));
     217            AssertMsg(uErrorCode == 0,            ("Invalid uErrorCode=%#x u8TrapNo=%d\n", uErrorCode, pVCpu->trpm.s.uActiveVector));
    218218            break;
    219219        default:
  • trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp

    r41965 r42024  
    186186        {
    187187            uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001);
    188             if (fExtFeaturesEDX & X86_CPUID_AMD_FEATURE_EDX_SEP)
     188            if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
    189189            {
    190190#ifdef RT_ARCH_X86
    191191# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    192                 if (fExtFeaturesEDX & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
     192                if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
    193193# else
    194194                if (!ASMIsIntelCpu())
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r41965 r42024  
    22192219    {
    22202220        Log2(("SVM: Rdtscp\n"));
    2221         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtsc);
     2221        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtscp);
    22222222        rc = EMInterpretRdtscp(pVM, pVCpu, pCtx);
    22232223        if (rc == VINF_SUCCESS)
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r41965 r42024  
    568568            if (pVM->hwaccm.s.vmx.fUnrestrictedGuest)
    569569                val |= VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE;
     570
     571            if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     572                val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;
    570573
    571574            /* Mask away the bits that the CPU doesn't support */
     
    13161319         * Check if EFER MSR present.
    13171320         */
    1318         if (ASMCpuId_EDX(0x80000001) & (X86_CPUID_AMD_FEATURE_EDX_NX|X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
    1319         {
    1320             if (ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP)
     1321        if (ASMCpuId_EDX(0x80000001) & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
     1322        {
     1323            if (ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
    13211324            {
    13221325                pMsr->u32IndexMSR = MSR_K6_STAR;
    13231326                pMsr->u32Reserved = 0;
    1324                 pMsr->u64Value    = ASMRdMsr(MSR_K6_STAR);                   /* legacy syscall eip, cs & ss */
     1327                pMsr->u64Value    = ASMRdMsr(MSR_K6_STAR);          /* legacy syscall eip, cs & ss */
    13251328                pMsr++; idxMsr++;
    13261329            }
     
    20982101    CPUMGetGuestCpuId(pVCpu, 0x80000001, &ulTemp, &ulTemp, &ulTemp, &ulEdx);
    20992102    /* EFER MSR present? */
    2100     if (ulEdx & (X86_CPUID_AMD_FEATURE_EDX_NX|X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
     2103    if (ulEdx & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
    21012104    {
    21022105        pMsr->u32IndexMSR = MSR_K6_EFER;
     
    21052108        /* VT-x will complain if only MSR_K6_EFER_LME is set. */
    21062109        if (!CPUMIsGuestInLongModeEx(pCtx))
    2107             pMsr->u64Value &= ~(MSR_K6_EFER_LMA|MSR_K6_EFER_LME);
     2110            pMsr->u64Value &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
    21082111        pMsr++; idxMsr++;
    21092112
    2110         if (ulEdx & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
     2113        if (ulEdx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
    21112114        {
    21122115            pMsr->u32IndexMSR = MSR_K8_LSTAR;
     
    21612164        if (u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
    21622165        {
    2163             /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET */
     2166            /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
    21642167            rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, pVCpu->hwaccm.s.vmx.u64TSCOffset);
    21652168            AssertRC(rc);
     
    21722175        else
    21732176        {
    2174             /* Fall back to rdtsc emulation as we would otherwise pass decreasing tsc values to the guest. */
     2177            /* Fall back to rdtsc, rdtscp emulation as we would otherwise pass decreasing tsc values to the guest. */
    21752178            LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC,
    21762179                     pVCpu->hwaccm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset,
     
    23322335        switch (pMsr->u32IndexMSR)
    23332336        {
    2334         case MSR_K8_LSTAR:
    2335             pCtx->msrLSTAR = pMsr->u64Value;
    2336             break;
    2337         case MSR_K6_STAR:
    2338             pCtx->msrSTAR = pMsr->u64Value;
    2339             break;
    2340         case MSR_K8_SF_MASK:
    2341             pCtx->msrSFMASK = pMsr->u64Value;
    2342             break;
    2343         case MSR_K8_KERNEL_GS_BASE:
    2344             pCtx->msrKERNELGSBASE = pMsr->u64Value;
    2345             break;
    2346         case MSR_K6_EFER:
    2347             /* EFER can't be changed without causing a VM-exit. */
    2348             /* Assert(pCtx->msrEFER == pMsr->u64Value); */
    2349             break;
    2350         default:
    2351             AssertFailed();
    2352             return VERR_HM_UNEXPECTED_LD_ST_MSR;
     2337            case MSR_K8_LSTAR:
     2338                pCtx->msrLSTAR = pMsr->u64Value;
     2339                break;
     2340            case MSR_K6_STAR:
     2341                pCtx->msrSTAR = pMsr->u64Value;
     2342                break;
     2343            case MSR_K8_SF_MASK:
     2344                pCtx->msrSFMASK = pMsr->u64Value;
     2345                break;
     2346            case MSR_K8_KERNEL_GS_BASE:
     2347                pCtx->msrKERNELGSBASE = pMsr->u64Value;
     2348                break;
     2349            case MSR_K6_EFER:
     2350                /* EFER can't be changed without causing a VM-exit. */
     2351                /* Assert(pCtx->msrEFER == pMsr->u64Value); */
     2352                break;
     2353            default:
     2354                AssertFailed();
     2355                return VERR_HM_UNEXPECTED_LD_ST_MSR;
    23532356        }
    23542357    }
     
    24532456    else
    24542457    {
    2455         AssertMsg(pVCpu->hwaccm.s.uCurrentASID && pCpu->uCurrentASID, ("hwaccm uCurrentASID=%lu cpu uCurrentASID=%lu\n",
    2456                                                                        pVCpu->hwaccm.s.uCurrentASID, pCpu->uCurrentASID));
     2458        AssertMsg(pVCpu->hwaccm.s.uCurrentASID && pCpu->uCurrentASID,
     2459                  ("hwaccm->uCurrentASID=%lu hwaccm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",
     2460                   pVCpu->hwaccm.s.uCurrentASID, pVCpu->hwaccm.s.cTLBFlushes,
     2461                   pCpu->uCurrentASID, pCpu->cTLBFlushes));
    24572462
    24582463        /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
     
    40464051            /* Update EIP and continue execution. */
    40474052            Assert(cbInstr == 2);
     4053            pCtx->rip += cbInstr;
     4054            goto ResumeExecution;
     4055        }
     4056        rc = VINF_EM_RAW_EMULATE_INSTR;
     4057        break;
     4058    }
     4059
     4060    case VMX_EXIT_RDTSCP:                /* 51 Guest software attempted to execute RDTSCP. */
     4061    {
     4062        Log2(("VMX: Rdtscp\n"));
     4063        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtscp);
     4064        rc = EMInterpretRdtscp(pVM, pVCpu, pCtx);
     4065        if (rc == VINF_SUCCESS)
     4066        {
     4067            /* Update EIP and continue execution. */
     4068            Assert(cbInstr == 3);
    40484069            pCtx->rip += cbInstr;
    40494070            goto ResumeExecution;
     
    46264647    case VMX_EXIT_PORT_IO:              /* 30 I/O instruction. */
    46274648    case VMX_EXIT_RDPMC:                /* 15 Guest software attempted to execute RDPMC. */
     4649    case VMX_EXIT_RDTSCP:               /* 51 Guest software attempted to execute RDTSCP. */
    46284650        /* already handled above */
    46294651        AssertMsg(   rc == VINF_PGM_CHANGE_MODE
  • trunk/src/VBox/VMM/VMMR0/TRPMR0.cpp

    r41965 r42024  
    6262     * Check if we're in long mode or not.
    6363     */
    64     if (    (ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
     64    if (    (ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
    6565        &&  (ASMRdMsr(MSR_K6_EFER) & MSR_K6_EFER_LMA))
    6666    {
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r41965 r42024  
    684684     */
    685685    /** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
    686      * Overrides the host CPUID leaf values used for calculating the guest CPUID
    687      * leaves.  This can be used to preserve the CPUID values when moving a VM
    688      * to a different machine.  Another use is restricting (or extending) the
    689      * feature set exposed to the guest. */
     686     * Loads the host CPUID leaves to the guest copy. Overrides, if any, the host
     687     * CPUID leaf values used for calculating the guest CPUID leaves.  This can be
     688     * used to preserve the CPUID values when moving a VM to a different machine.
     689     * Another use is restricting (or extending) the feature set exposed to the
     690     * guest. */
    690691    PCFGMNODE pHostOverrideCfg = CFGMR3GetChild(pCpumCfg, "HostCPUID");
    691692    rc = cpumR3CpuIdInitHostSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0],     RT_ELEMENTS(pCPUM->aGuestCpuIdStd),     pHostOverrideCfg);
     
    825826                                  //| X86_CPUID_AMD_FEATURE_EDX_APIC   - set by the APIC device if present.
    826827                                  /* Note! we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see @bugref{1757}) */
    827                                   //| X86_CPUID_AMD_FEATURE_EDX_SEP
     828                                  //| X86_CPUID_EXT_FEATURE_EDX_SEP
    828829                                  | X86_CPUID_AMD_FEATURE_EDX_MTRR
    829830                                  | X86_CPUID_AMD_FEATURE_EDX_PGE
     
    832833                                  | X86_CPUID_AMD_FEATURE_EDX_PAT
    833834                                  | X86_CPUID_AMD_FEATURE_EDX_PSE36
    834                                   //| X86_CPUID_AMD_FEATURE_EDX_NX     - not virtualized, requires PAE.
     835                                  //| X86_CPUID_EXT_FEATURE_EDX_NX     - not virtualized, requires PAE.
    835836                                  //| X86_CPUID_AMD_FEATURE_EDX_AXMMX
    836837                                  | X86_CPUID_AMD_FEATURE_EDX_MMX
    837838                                  | X86_CPUID_AMD_FEATURE_EDX_FXSR
    838839                                  | X86_CPUID_AMD_FEATURE_EDX_FFXSR
    839                                   //| X86_CPUID_AMD_FEATURE_EDX_PAGE1GB
    840                                   //| X86_CPUID_AMD_FEATURE_EDX_RDTSCP - AMD only; turned on when necessary
    841                                   //| X86_CPUID_AMD_FEATURE_EDX_LONG_MODE - turned on when necessary
     840                                  //| X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
     841                                  | X86_CPUID_EXT_FEATURE_EDX_RDTSCP
     842                                  //| X86_CPUID_EXT_FEATURE_EDX_LONG_MODE - turned on when necessary
    842843                                  | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX
    843844                                  | X86_CPUID_AMD_FEATURE_EDX_3DNOW
    844845                                  | 0;
    845846    pCPUM->aGuestCpuIdExt[1].ecx &= 0
    846                                   //| X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF
     847                                  //| X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF
    847848                                  //| X86_CPUID_AMD_FEATURE_ECX_CMPL
    848849                                  //| X86_CPUID_AMD_FEATURE_ECX_SVM    - not virtualized.
     
    866867        PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW_EX,   X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
    867868        PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, FFXSR,      X86_CPUID_AMD_FEATURE_EDX_FFXSR);
    868         PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, RDTSCP,     X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
    869         PORTABLE_DISABLE_FEATURE_BIT(2, Ext[1].ecx, LAHF_SAHF,  X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF);
     869        PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, RDTSCP,     X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
     870        PORTABLE_DISABLE_FEATURE_BIT(2, Ext[1].ecx, LAHF_SAHF,  X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
    870871        PORTABLE_DISABLE_FEATURE_BIT(3, Ext[1].ecx, CMOV,       X86_CPUID_AMD_FEATURE_EDX_CMOV);
    871872
     
    886887                                                 )));
    887888        Assert(!(pCPUM->aGuestCpuIdExt[1].edx & (  RT_BIT(10)
    888                                                  | X86_CPUID_AMD_FEATURE_EDX_SEP
     889                                                 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
    889890                                                 | RT_BIT(18)
    890891                                                 | RT_BIT(19)
    891892                                                 | RT_BIT(21)
    892893                                                 | X86_CPUID_AMD_FEATURE_EDX_AXMMX
    893                                                  | X86_CPUID_AMD_FEATURE_EDX_PAGE1GB
     894                                                 | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
    894895                                                 | RT_BIT(28)
    895896                                                 )));
     
    929930        pCPUM->aGuestCpuIdExt[0].ebx = pCPUM->aGuestCpuIdExt[0].ecx = pCPUM->aGuestCpuIdExt[0].edx = 0;
    930931
    931         /* 0x800000001: AMD only; shared feature bits are set dynamically. */
     932        /* 0x800000001: shared feature bits are set dynamically. */
    932933        memset(&pCPUM->aGuestCpuIdExt[1], 0, sizeof(pCPUM->aGuestCpuIdExt[1]));
    933934
     
    12201221    rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, false);                 AssertRCReturn(rc, rc);
    12211222    if (fEnable)
    1222         CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
     1223        CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
    12231224
    12241225    /*
     
    19271928
    19281929            /* CPUID(0x80000001).ecx */
    1929             CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF);
     1930            CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
    19301931            CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL);
    19311932            CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM);
     
    19721973            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_APIC);
    19731974            CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(10) /*reserved*/);
    1974             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_SEP);
     1975            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SEP);
    19751976            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MTRR);
    19761977            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PGE);
     
    19811982            CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(18) /*reserved*/);
    19821983            CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(19) /*reserved*/);
    1983             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_NX);
     1984            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX);
    19841985            CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(21) /*reserved*/);
    19851986            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
     
    19871988            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FXSR);
    19881989            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
    1989             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAGE1GB);
    1990             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
     1990            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
     1991            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
    19911992            CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(28) /*reserved*/);
    1992             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
     1993            CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
    19931994            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
    19941995            CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
     
    20892090
    20902091        /* CPUID(0x80000001).ecx */
    2091         CPUID_GST_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF);   // -> EMU
     2092        CPUID_GST_FEATURE_WRN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);   // -> EMU
    20922093        CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL);    // -> EMU
    20932094        CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM);     // -> EMU
     
    21342135        CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_APIC,  X86_CPUID_FEATURE_EDX_APIC);
    21352136        CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(10) /*reserved*/);
    2136         CPUID_GST_FEATURE_IGN(    Ext, edx, X86_CPUID_AMD_FEATURE_EDX_SEP);                                  // Intel: long mode only.
     2137        CPUID_GST_FEATURE_IGN(    Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL);                              // On Intel: long mode only.
    21372138        CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_MTRR,  X86_CPUID_FEATURE_EDX_MTRR);
    21382139        CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_PGE,   X86_CPUID_FEATURE_EDX_PGE);
     
    21432144        CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(18) /*reserved*/);
    21442145        CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(19) /*reserved*/);
    2145         CPUID_GST_FEATURE_RET(    Ext, edx, X86_CPUID_AMD_FEATURE_EDX_NX);
     2146        CPUID_GST_FEATURE_RET(    Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX);
    21462147        CPUID_GST_FEATURE_WRN(    Ext, edx, RT_BIT_32(21) /*reserved*/);
    21472148        CPUID_GST_FEATURE_RET(    Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
     
    21492150        CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_FXSR,  X86_CPUID_FEATURE_EDX_FXSR);    // -> EMU
    21502151        CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
    2151         CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAGE1GB);
    2152         CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_RDTSCP);
     2152        CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
     2153        CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
    21532154        CPUID_GST_FEATURE_IGN(    Ext, edx, RT_BIT_32(28) /*reserved*/);
    2154         CPUID_GST_FEATURE_RET(    Ext, edx, X86_CPUID_AMD_FEATURE_EDX_LONG_MODE);
     2155        CPUID_GST_FEATURE_RET(    Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
    21552156        CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
    21562157        CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
     
    34363437            pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology             = %d (%d)\n",  !!(uEdxGst & RT_BIT(23)),  !!(uEdxHst & RT_BIT(23)));
    34373438            pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n",  !!(uEdxGst & RT_BIT(24)),  !!(uEdxHst & RT_BIT(24)));
    3438             pHlp->pfnPrintf(pHlp, "25 - AMD fast FXSAVE and FXRSTOR Instr.= %d (%d)\n",  !!(uEdxGst & RT_BIT(25)),  !!(uEdxHst & RT_BIT(25)));
    3439             pHlp->pfnPrintf(pHlp, "26 - 1 GB large page support           = %d (%d)\n",  !!(uEdxGst & RT_BIT(26)),  !!(uEdxHst & RT_BIT(26)));
    3440             pHlp->pfnPrintf(pHlp, "27 - RDTSCP instruction                = %d (%d)\n",  !!(uEdxGst & RT_BIT(27)),  !!(uEdxHst & RT_BIT(27)));
     3439            pHlp->pfnPrintf(pHlp, "AMD fast FXSAVE and FXRSTOR Instr.     = %d (%d)\n",  !!(uEdxGst & RT_BIT(25)),  !!(uEdxHst & RT_BIT(25)));
     3440            pHlp->pfnPrintf(pHlp, "1 GB large page support                = %d (%d)\n",  !!(uEdxGst & RT_BIT(26)),  !!(uEdxHst & RT_BIT(26)));
     3441            pHlp->pfnPrintf(pHlp, "RDTSCP instruction                     = %d (%d)\n",  !!(uEdxGst & RT_BIT(27)),  !!(uEdxHst & RT_BIT(27)));
    34413442            pHlp->pfnPrintf(pHlp, "28 - Reserved                          = %d (%d)\n",  !!(uEdxGst & RT_BIT(28)),  !!(uEdxHst & RT_BIT(28)));
    3442             pHlp->pfnPrintf(pHlp, "29 - AMD Long Mode                     = %d (%d)\n",  !!(uEdxGst & RT_BIT(29)),  !!(uEdxHst & RT_BIT(29)));
    3443             pHlp->pfnPrintf(pHlp, "30 - AMD Extensions to 3DNow           = %d (%d)\n",  !!(uEdxGst & RT_BIT(30)),  !!(uEdxHst & RT_BIT(30)));
    3444             pHlp->pfnPrintf(pHlp, "31 - AMD 3DNow                         = %d (%d)\n",  !!(uEdxGst & RT_BIT(31)),  !!(uEdxHst & RT_BIT(31)));
     3443            pHlp->pfnPrintf(pHlp, "AMD Long Mode / Intel 64 ISA           = %d (%d)\n",  !!(uEdxGst & RT_BIT(29)),  !!(uEdxHst & RT_BIT(29)));
     3444            pHlp->pfnPrintf(pHlp, "AMD Extensions to 3DNow!               = %d (%d)\n",  !!(uEdxGst & RT_BIT(30)),  !!(uEdxHst & RT_BIT(30)));
     3445            pHlp->pfnPrintf(pHlp, "AMD 3DNow!                             = %d (%d)\n",  !!(uEdxGst & RT_BIT(31)),  !!(uEdxHst & RT_BIT(31)));
    34453446
    34463447            uint32_t uEcxGst = Guest.ecx;
  • trunk/src/VBox/VMM/VMMR3/HWACCM.cpp

    r41965 r42024  
    110110    EXIT_REASON(VMX_EXIT_EPT_MISCONFIG      , 49, "EPT misconfiguration. An attempt to access memory with a guest-physical address encountered a misconfigured EPT paging-structure entry."),
    111111    EXIT_REASON(VMX_EXIT_INVEPT             , 50, "INVEPT. Guest software attempted to execute INVEPT."),
    112     EXIT_REASON_NIL(),
     112    EXIT_REASON(VMX_EXIT_RDTSCP             , 51, "Guest software attempted to execute RDTSCP."),
    113113    EXIT_REASON(VMX_EXIT_PREEMPTION_TIMER   , 52, "VMX-preemption timer expired. The preemption timer counted down to zero."),
    114114    EXIT_REASON(VMX_EXIT_INVVPID            , 53, "INVVPID. Guest software attempted to execute INVVPID."),
     
    514514        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCpuid,              "/HWACCM/CPU%d/Exit/Instr/Cpuid");
    515515        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtsc,              "/HWACCM/CPU%d/Exit/Instr/Rdtsc");
     516        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtscp,             "/HWACCM/CPU%d/Exit/Instr/Rdtscp");
    516517        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdpmc,              "/HWACCM/CPU%d/Exit/Instr/Rdpmc");
    517518        HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdmsr,              "/HWACCM/CPU%d/Exit/Instr/Rdmsr");
     
    928929                if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
    929930                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT\n"));
    930                 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT)
    931                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT\n"));
     931                if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     932                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP\n"));
    932933                if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
    933934                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_X2APIC\n"));
     
    946947                if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
    947948                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT *must* be set\n"));
    948                 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT)
    949                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP_EXIT *must* be set\n"));
     949                if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     950                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP *must* be set\n"));
    950951                if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
    951952                    LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_X2APIC *must* be set\n"));
     
    10901091                LogRel(("HWACCM:    MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc)));
    10911092            else
    1092                 LogRel(("HWACCM:    MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x - erratum detected, using %x instead\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc), pVM->hwaccm.s.vmx.cPreemptTimerShift));
     1093            {
     1094                LogRel(("HWACCM:    MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x - erratum detected, using %x instead\n",
     1095                        MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc), pVM->hwaccm.s.vmx.cPreemptTimerShift));
     1096            }
    10931097            LogRel(("HWACCM:    MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hwaccm.s.vmx.msr.vmx_misc)));
    10941098            LogRel(("HWACCM:    MSR_IA32_VMX_MISC_CR3_TARGET      %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hwaccm.s.vmx.msr.vmx_misc)));
     
    11881192                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);            /* 64 bits only on Intel CPUs */
    11891193                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
    1190                     CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
     1194                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
    11911195                }
    11921196                else
     
    11951199                if (    CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE)
    11961200                    &&  (pVM->hwaccm.s.vmx.hostEFER & MSR_K6_EFER_NXE))
    1197                     CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
     1201                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
    11981202
    11991203                LogRel((pVM->hwaccm.s.fAllow64BitGuests
     
    12481252                        ASMCpuId(0x80000000, &u32Eax, &u32Dummy, &u32Dummy, &u32Dummy);
    12491253                        if (    u32Eax < 0x80000001
    1250                             ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
     1254                            ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
    12511255                        {
    12521256                            pVM->hwaccm.s.fTRPPatchingAllowed = false;
     
    13771381                CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
    13781382                CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
    1379                 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
    13801383#ifdef VBOX_ENABLE_64_BITS_GUESTS
    13811384                if (pVM->hwaccm.s.fAllow64BitGuests)
     
    13831386                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
    13841387                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
    1385                     CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
     1388                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
    13861389                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
    13871390                }
     
    13891392                /* Turn on NXE if PAE has been enabled. */
    13901393                if (CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
    1391                     CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE);
     1394                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
    13921395#endif
    13931396
  • trunk/src/VBox/VMM/VMMR3/VMMSwitcher.cpp

    r41976 r42024  
    536536            {
    537537                uint32_t u32OrMask = MSR_K6_EFER_LME | MSR_K6_EFER_SCE;
    538                 /** note: we don't care if cpuid 0x8000001 isn't supported as that implies long mode isn't either, so this switcher would never be used. */
    539                 if (!!(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX))
     538                /*
     539                 * We don't care if cpuid 0x8000001 isn't supported as that implies
     540                 * long mode isn't supported either, so this switched would never be used.
     541                 */
     542                if (!!(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX))
    540543                    u32OrMask |= MSR_K6_EFER_NXE;
    541544
  • trunk/src/VBox/VMM/include/HWACCMInternal.h

    r41783 r42024  
    792792    STAMCOUNTER             StatExitCpuid;
    793793    STAMCOUNTER             StatExitRdtsc;
     794    STAMCOUNTER             StatExitRdtscp;
    794795    STAMCOUNTER             StatExitRdpmc;
    795796    STAMCOUNTER             StatExitCli;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette