Changeset 42024 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jul 5, 2012 12:10:53 PM (13 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r41976 r42024 1042 1042 1043 1043 /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */ 1044 if (fExtFeatures & X86_CPUID_ AMD_FEATURE_EDX_NX)1044 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX) 1045 1045 fMask |= MSR_K6_EFER_NXE; 1046 if (fExtFeatures & X86_CPUID_ AMD_FEATURE_EDX_LONG_MODE)1046 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) 1047 1047 fMask |= MSR_K6_EFER_LME; 1048 if (fExtFeatures & X86_CPUID_ AMD_FEATURE_EDX_SEP)1048 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL) 1049 1049 fMask |= MSR_K6_EFER_SCE; 1050 1050 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR) … … 1584 1584 { 1585 1585 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 1586 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_ AMD_FEATURE_EDX_SEP))1586 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)) 1587 1587 { 1588 1588 #if HC_ARCH_BITS == 32 1589 /* X86_CPUID_ AMD_FEATURE_EDX_SEPnot set it seems in 32 bits mode.1589 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32 bits mode. 1590 1590 * Even when the cpu is capable of doing so in 64 bits mode. 1591 1591 */ 1592 1592 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 1593 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_ AMD_FEATURE_EDX_LONG_MODE)1594 || !(ASMCpuId_EDX(1) & X86_CPUID_ FEATURE_EDX_SEP))1593 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) 1594 || !(ASMCpuId_EDX(1) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)) 1595 1595 #endif 1596 1596 { … … 1600 1600 } 1601 1601 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */ 1602 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_ AMD_FEATURE_EDX_SEP;1602 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL; 1603 1603 LogRel(("CPUMSetGuestCpuIdFeature: Enabled syscall/ret\n")); 1604 1604 break; … … 1633 1633 { 1634 1634 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 1635 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_ AMD_FEATURE_EDX_LONG_MODE))1635 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)) 1636 1636 { 1637 1637 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n")); … … 1640 1640 1641 1641 /* Valid for both Intel and AMD. */ 1642 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_ AMD_FEATURE_EDX_LONG_MODE;1642 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE; 1643 1643 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n")); 1644 1644 break; … … 1646 1646 1647 1647 /* 1648 * Set the NX Ebit in the extended feature mask.1648 * Set the NX/XD bit in the extended feature mask. 1649 1649 * Assumes the caller knows what it's doing! (host must support these) 1650 1650 */ 1651 case CPUMCPUIDFEATURE_NX E:1651 case CPUMCPUIDFEATURE_NX: 1652 1652 { 1653 1653 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 1654 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_ AMD_FEATURE_EDX_NX))1654 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX)) 1655 1655 { 1656 LogRel(("WARNING: Can't turn on NX Ewhen the host doesn't support it!!\n"));1656 LogRel(("WARNING: Can't turn on NX/XD when the host doesn't support it!!\n")); 1657 1657 return; 1658 1658 } 1659 1659 1660 1660 /* Valid for both Intel and AMD. */ 1661 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX; 1662 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n")); 1663 break; 1664 } 1665 1661 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_NX; 1662 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NX\n")); 1663 break; 1664 } 1665 1666 /* 1667 * Set the LAHF/SAHF support in 64-bit mode. 1668 * Assumes the caller knows what it's doing! (host must support this) 1669 */ 1666 1670 case CPUMCPUIDFEATURE_LAHF: 1667 1671 { 1668 1672 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 1669 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_ AMD_FEATURE_ECX_LAHF_SAHF))1673 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF)) 1670 1674 { 1671 1675 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n")); … … 1673 1677 } 1674 1678 1675 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF; 1679 /* Valid for both Intel and AMD. */ 1680 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF; 1676 1681 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n")); 1677 1682 break; … … 1689 1694 } 1690 1695 1696 /* 1697 * Set the RDTSCP support bit. 1698 * Assumes the caller knows what it's doing! (host must support this) 1699 */ 1691 1700 case CPUMCPUIDFEATURE_RDTSCP: 1692 1701 { 1693 1702 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 1694 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_ AMD_FEATURE_EDX_RDTSCP)1703 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) 1695 1704 || pVM->cpum.s.u8PortableCpuIdLevel > 0) 1696 1705 { … … 1700 1709 } 1701 1710 1702 /* Valid for AMD only (for now). */1703 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_ AMD_FEATURE_EDX_RDTSCP;1711 /* Valid for both Intel and AMD. */ 1712 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP; 1704 1713 LogRel(("CPUMSetGuestCpuIdFeature: Enabled RDTSCP.\n")); 1705 1714 break; … … 1745 1754 } 1746 1755 1747 case CPUMCPUIDFEATURE_NX E:1756 case CPUMCPUIDFEATURE_NX: 1748 1757 { 1749 1758 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) 1750 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_ AMD_FEATURE_EDX_NX);1759 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_NX); 1751 1760 } 1752 1761 … … 1754 1763 { 1755 1764 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) 1756 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_ AMD_FEATURE_EDX_RDTSCP);1765 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 1757 1766 break; 1758 1767 } … … 1761 1770 { 1762 1771 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) 1763 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_ AMD_FEATURE_EDX_LONG_MODE);1772 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE); 1764 1773 break; 1765 1774 } … … 1829 1838 { 1830 1839 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) 1831 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_ AMD_FEATURE_EDX_LONG_MODE;1840 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE; 1832 1841 break; 1833 1842 } … … 1836 1845 { 1837 1846 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) 1838 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_ AMD_FEATURE_ECX_LAHF_SAHF;1847 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF; 1839 1848 break; 1840 1849 } -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r40266 r42024 982 982 { 983 983 /* AMD prefetch group, Intel implements this as NOP Ev (and so do we). */ 984 if (!IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(X86_CPUID_ AMD_FEATURE_EDX_LONG_MODE | X86_CPUID_AMD_FEATURE_EDX_3DNOW,984 if (!IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(X86_CPUID_EXT_FEATURE_EDX_LONG_MODE | X86_CPUID_AMD_FEATURE_EDX_3DNOW, 985 985 X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF)) 986 986 { … … 8678 8678 IEMOP_HLP_NO_LOCK_PREFIX(); 8679 8679 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT 8680 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_ AMD_FEATURE_ECX_LAHF_SAHF))8680 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF)) 8681 8681 return IEMOP_RAISE_INVALID_OPCODE(); 8682 8682 IEM_MC_BEGIN(0, 2); … … 8702 8702 IEMOP_HLP_NO_LOCK_PREFIX(); 8703 8703 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT 8704 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_ AMD_FEATURE_ECX_LAHF_SAHF))8704 && !IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF)) 8705 8705 return IEMOP_RAISE_INVALID_OPCODE(); 8706 8706 IEM_MC_BEGIN(0, 1); -
trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp
r41965 r42024 215 215 break; 216 216 case 0x11: case 0x08: 217 AssertMsg(uErrorCode == 0, 217 AssertMsg(uErrorCode == 0, ("Invalid uErrorCode=%#x u8TrapNo=%d\n", uErrorCode, pVCpu->trpm.s.uActiveVector)); 218 218 break; 219 219 default: -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r41965 r42024 186 186 { 187 187 uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001); 188 if (fExtFeaturesEDX & X86_CPUID_ AMD_FEATURE_EDX_SEP)188 if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_SYSCALL) 189 189 { 190 190 #ifdef RT_ARCH_X86 191 191 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 192 if (fExtFeaturesEDX & X86_CPUID_ AMD_FEATURE_EDX_LONG_MODE)192 if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) 193 193 # else 194 194 if (!ASMIsIntelCpu()) -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r41965 r42024 2219 2219 { 2220 2220 Log2(("SVM: Rdtscp\n")); 2221 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtsc );2221 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtscp); 2222 2222 rc = EMInterpretRdtscp(pVM, pVCpu, pCtx); 2223 2223 if (rc == VINF_SUCCESS) -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r41965 r42024 568 568 if (pVM->hwaccm.s.vmx.fUnrestrictedGuest) 569 569 val |= VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE; 570 571 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 572 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; 570 573 571 574 /* Mask away the bits that the CPU doesn't support */ … … 1316 1319 * Check if EFER MSR present. 1317 1320 */ 1318 if (ASMCpuId_EDX(0x80000001) & (X86_CPUID_ AMD_FEATURE_EDX_NX|X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))1319 { 1320 if (ASMCpuId_EDX(0x80000001) & X86_CPUID_ AMD_FEATURE_EDX_SEP)1321 if (ASMCpuId_EDX(0x80000001) & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)) 1322 { 1323 if (ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL) 1321 1324 { 1322 1325 pMsr->u32IndexMSR = MSR_K6_STAR; 1323 1326 pMsr->u32Reserved = 0; 1324 pMsr->u64Value = ASMRdMsr(MSR_K6_STAR); 1327 pMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */ 1325 1328 pMsr++; idxMsr++; 1326 1329 } … … 2098 2101 CPUMGetGuestCpuId(pVCpu, 0x80000001, &ulTemp, &ulTemp, &ulTemp, &ulEdx); 2099 2102 /* EFER MSR present? */ 2100 if (ulEdx & (X86_CPUID_ AMD_FEATURE_EDX_NX|X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))2103 if (ulEdx & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)) 2101 2104 { 2102 2105 pMsr->u32IndexMSR = MSR_K6_EFER; … … 2105 2108 /* VT-x will complain if only MSR_K6_EFER_LME is set. */ 2106 2109 if (!CPUMIsGuestInLongModeEx(pCtx)) 2107 pMsr->u64Value &= ~(MSR_K6_EFER_LMA |MSR_K6_EFER_LME);2110 pMsr->u64Value &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME); 2108 2111 pMsr++; idxMsr++; 2109 2112 2110 if (ulEdx & X86_CPUID_ AMD_FEATURE_EDX_LONG_MODE)2113 if (ulEdx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) 2111 2114 { 2112 2115 pMsr->u32IndexMSR = MSR_K8_LSTAR; … … 2161 2164 if (u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu)) 2162 2165 { 2163 /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET */2166 /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */ 2164 2167 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, pVCpu->hwaccm.s.vmx.u64TSCOffset); 2165 2168 AssertRC(rc); … … 2172 2175 else 2173 2176 { 2174 /* Fall back to rdtsc emulation as we would otherwise pass decreasing tsc values to the guest. */2177 /* Fall back to rdtsc, rdtscp emulation as we would otherwise pass decreasing tsc values to the guest. */ 2175 2178 LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC, 2176 2179 pVCpu->hwaccm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset, … … 2332 2335 switch (pMsr->u32IndexMSR) 2333 2336 { 2334 case MSR_K8_LSTAR:2335 pCtx->msrLSTAR = pMsr->u64Value;2336 break;2337 case MSR_K6_STAR:2338 pCtx->msrSTAR = pMsr->u64Value;2339 break;2340 case MSR_K8_SF_MASK:2341 pCtx->msrSFMASK = pMsr->u64Value;2342 break;2343 case MSR_K8_KERNEL_GS_BASE:2344 pCtx->msrKERNELGSBASE = pMsr->u64Value;2345 break;2346 case MSR_K6_EFER:2347 /* EFER can't be changed without causing a VM-exit. */2348 /* Assert(pCtx->msrEFER == pMsr->u64Value); */2349 break;2350 default:2351 AssertFailed();2352 return VERR_HM_UNEXPECTED_LD_ST_MSR;2337 case MSR_K8_LSTAR: 2338 pCtx->msrLSTAR = pMsr->u64Value; 2339 break; 2340 case MSR_K6_STAR: 2341 pCtx->msrSTAR = pMsr->u64Value; 2342 break; 2343 case MSR_K8_SF_MASK: 2344 pCtx->msrSFMASK = pMsr->u64Value; 2345 break; 2346 case MSR_K8_KERNEL_GS_BASE: 2347 pCtx->msrKERNELGSBASE = pMsr->u64Value; 2348 break; 2349 case MSR_K6_EFER: 2350 /* EFER can't be changed without causing a VM-exit. */ 2351 /* Assert(pCtx->msrEFER == pMsr->u64Value); */ 2352 break; 2353 default: 2354 AssertFailed(); 2355 return VERR_HM_UNEXPECTED_LD_ST_MSR; 2353 2356 } 2354 2357 } … … 2453 2456 else 2454 2457 { 2455 AssertMsg(pVCpu->hwaccm.s.uCurrentASID && pCpu->uCurrentASID, ("hwaccm uCurrentASID=%lu cpu uCurrentASID=%lu\n", 2456 pVCpu->hwaccm.s.uCurrentASID, pCpu->uCurrentASID)); 2458 AssertMsg(pVCpu->hwaccm.s.uCurrentASID && pCpu->uCurrentASID, 2459 ("hwaccm->uCurrentASID=%lu hwaccm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n", 2460 pVCpu->hwaccm.s.uCurrentASID, pVCpu->hwaccm.s.cTLBFlushes, 2461 pCpu->uCurrentASID, pCpu->cTLBFlushes)); 2457 2462 2458 2463 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should … … 4046 4051 /* Update EIP and continue execution. */ 4047 4052 Assert(cbInstr == 2); 4053 pCtx->rip += cbInstr; 4054 goto ResumeExecution; 4055 } 4056 rc = VINF_EM_RAW_EMULATE_INSTR; 4057 break; 4058 } 4059 4060 case VMX_EXIT_RDTSCP: /* 51 Guest software attempted to execute RDTSCP. */ 4061 { 4062 Log2(("VMX: Rdtscp\n")); 4063 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtscp); 4064 rc = EMInterpretRdtscp(pVM, pVCpu, pCtx); 4065 if (rc == VINF_SUCCESS) 4066 { 4067 /* Update EIP and continue execution. */ 4068 Assert(cbInstr == 3); 4048 4069 pCtx->rip += cbInstr; 4049 4070 goto ResumeExecution; … … 4626 4647 case VMX_EXIT_PORT_IO: /* 30 I/O instruction. */ 4627 4648 case VMX_EXIT_RDPMC: /* 15 Guest software attempted to execute RDPMC. */ 4649 case VMX_EXIT_RDTSCP: /* 51 Guest software attempted to execute RDTSCP. */ 4628 4650 /* already handled above */ 4629 4651 AssertMsg( rc == VINF_PGM_CHANGE_MODE -
trunk/src/VBox/VMM/VMMR0/TRPMR0.cpp
r41965 r42024 62 62 * Check if we're in long mode or not. 63 63 */ 64 if ( (ASMCpuId_EDX(0x80000001) & X86_CPUID_ AMD_FEATURE_EDX_LONG_MODE)64 if ( (ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) 65 65 && (ASMRdMsr(MSR_K6_EFER) & MSR_K6_EFER_LMA)) 66 66 { -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r41965 r42024 684 684 */ 685 685 /** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit} 686 * Overrides the host CPUID leaf values used for calculating the guest CPUID 687 * leaves. This can be used to preserve the CPUID values when moving a VM 688 * to a different machine. Another use is restricting (or extending) the 689 * feature set exposed to the guest. */ 686 * Loads the host CPUID leaves to the guest copy. Overrides, if any, the host 687 * CPUID leaf values used for calculating the guest CPUID leaves. This can be 688 * used to preserve the CPUID values when moving a VM to a different machine. 689 * Another use is restricting (or extending) the feature set exposed to the 690 * guest. */ 690 691 PCFGMNODE pHostOverrideCfg = CFGMR3GetChild(pCpumCfg, "HostCPUID"); 691 692 rc = cpumR3CpuIdInitHostSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0], RT_ELEMENTS(pCPUM->aGuestCpuIdStd), pHostOverrideCfg); … … 825 826 //| X86_CPUID_AMD_FEATURE_EDX_APIC - set by the APIC device if present. 826 827 /* Note! we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see @bugref{1757}) */ 827 //| X86_CPUID_ AMD_FEATURE_EDX_SEP828 //| X86_CPUID_EXT_FEATURE_EDX_SEP 828 829 | X86_CPUID_AMD_FEATURE_EDX_MTRR 829 830 | X86_CPUID_AMD_FEATURE_EDX_PGE … … 832 833 | X86_CPUID_AMD_FEATURE_EDX_PAT 833 834 | X86_CPUID_AMD_FEATURE_EDX_PSE36 834 //| X86_CPUID_ AMD_FEATURE_EDX_NX - not virtualized, requires PAE.835 //| X86_CPUID_EXT_FEATURE_EDX_NX - not virtualized, requires PAE. 835 836 //| X86_CPUID_AMD_FEATURE_EDX_AXMMX 836 837 | X86_CPUID_AMD_FEATURE_EDX_MMX 837 838 | X86_CPUID_AMD_FEATURE_EDX_FXSR 838 839 | X86_CPUID_AMD_FEATURE_EDX_FFXSR 839 //| X86_CPUID_ AMD_FEATURE_EDX_PAGE1GB840 //| X86_CPUID_AMD_FEATURE_EDX_RDTSCP - AMD only; turned on when necessary841 //| X86_CPUID_ AMD_FEATURE_EDX_LONG_MODE - turned on when necessary840 //| X86_CPUID_EXT_FEATURE_EDX_PAGE1GB 841 | X86_CPUID_EXT_FEATURE_EDX_RDTSCP 842 //| X86_CPUID_EXT_FEATURE_EDX_LONG_MODE - turned on when necessary 842 843 | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX 843 844 | X86_CPUID_AMD_FEATURE_EDX_3DNOW 844 845 | 0; 845 846 pCPUM->aGuestCpuIdExt[1].ecx &= 0 846 //| X86_CPUID_ AMD_FEATURE_ECX_LAHF_SAHF847 //| X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF 847 848 //| X86_CPUID_AMD_FEATURE_ECX_CMPL 848 849 //| X86_CPUID_AMD_FEATURE_ECX_SVM - not virtualized. … … 866 867 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX); 867 868 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR); 868 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, RDTSCP, X86_CPUID_ AMD_FEATURE_EDX_RDTSCP);869 PORTABLE_DISABLE_FEATURE_BIT(2, Ext[1].ecx, LAHF_SAHF, X86_CPUID_ AMD_FEATURE_ECX_LAHF_SAHF);869 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, RDTSCP, X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 870 PORTABLE_DISABLE_FEATURE_BIT(2, Ext[1].ecx, LAHF_SAHF, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); 870 871 PORTABLE_DISABLE_FEATURE_BIT(3, Ext[1].ecx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV); 871 872 … … 886 887 ))); 887 888 Assert(!(pCPUM->aGuestCpuIdExt[1].edx & ( RT_BIT(10) 888 | X86_CPUID_ AMD_FEATURE_EDX_SEP889 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL 889 890 | RT_BIT(18) 890 891 | RT_BIT(19) 891 892 | RT_BIT(21) 892 893 | X86_CPUID_AMD_FEATURE_EDX_AXMMX 893 | X86_CPUID_ AMD_FEATURE_EDX_PAGE1GB894 | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB 894 895 | RT_BIT(28) 895 896 ))); … … 929 930 pCPUM->aGuestCpuIdExt[0].ebx = pCPUM->aGuestCpuIdExt[0].ecx = pCPUM->aGuestCpuIdExt[0].edx = 0; 930 931 931 /* 0x800000001: AMD only;shared feature bits are set dynamically. */932 /* 0x800000001: shared feature bits are set dynamically. */ 932 933 memset(&pCPUM->aGuestCpuIdExt[1], 0, sizeof(pCPUM->aGuestCpuIdExt[1])); 933 934 … … 1220 1221 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, false); AssertRCReturn(rc, rc); 1221 1222 if (fEnable) 1222 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX E);1223 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX); 1223 1224 1224 1225 /* … … 1927 1928 1928 1929 /* CPUID(0x80000001).ecx */ 1929 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_ AMD_FEATURE_ECX_LAHF_SAHF);1930 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); 1930 1931 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL); 1931 1932 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM); … … 1972 1973 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_APIC); 1973 1974 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(10) /*reserved*/); 1974 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_ AMD_FEATURE_EDX_SEP);1975 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SEP); 1975 1976 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MTRR); 1976 1977 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PGE); … … 1981 1982 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(18) /*reserved*/); 1982 1983 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(19) /*reserved*/); 1983 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_ AMD_FEATURE_EDX_NX);1984 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX); 1984 1985 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(21) /*reserved*/); 1985 1986 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX); … … 1987 1988 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FXSR); 1988 1989 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR); 1989 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_ AMD_FEATURE_EDX_PAGE1GB);1990 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_ AMD_FEATURE_EDX_RDTSCP);1990 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB); 1991 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 1991 1992 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(28) /*reserved*/); 1992 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_ AMD_FEATURE_EDX_LONG_MODE);1993 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE); 1993 1994 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX); 1994 1995 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW); … … 2089 2090 2090 2091 /* CPUID(0x80000001).ecx */ 2091 CPUID_GST_FEATURE_WRN(Ext, ecx, X86_CPUID_ AMD_FEATURE_ECX_LAHF_SAHF); // -> EMU2092 CPUID_GST_FEATURE_WRN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); // -> EMU 2092 2093 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL); // -> EMU 2093 2094 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM); // -> EMU … … 2134 2135 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_APIC, X86_CPUID_FEATURE_EDX_APIC); 2135 2136 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(10) /*reserved*/); 2136 CPUID_GST_FEATURE_IGN( Ext, edx, X86_CPUID_ AMD_FEATURE_EDX_SEP); //Intel: long mode only.2137 CPUID_GST_FEATURE_IGN( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL); // On Intel: long mode only. 2137 2138 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MTRR, X86_CPUID_FEATURE_EDX_MTRR); 2138 2139 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PGE, X86_CPUID_FEATURE_EDX_PGE); … … 2143 2144 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(18) /*reserved*/); 2144 2145 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(19) /*reserved*/); 2145 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_ AMD_FEATURE_EDX_NX);2146 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX); 2146 2147 CPUID_GST_FEATURE_WRN( Ext, edx, RT_BIT_32(21) /*reserved*/); 2147 2148 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX); … … 2149 2150 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_FXSR, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU 2150 2151 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR); 2151 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_ AMD_FEATURE_EDX_PAGE1GB);2152 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_ AMD_FEATURE_EDX_RDTSCP);2152 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB); 2153 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 2153 2154 CPUID_GST_FEATURE_IGN( Ext, edx, RT_BIT_32(28) /*reserved*/); 2154 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_ AMD_FEATURE_EDX_LONG_MODE);2155 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE); 2155 2156 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX); 2156 2157 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW); … … 3436 3437 pHlp->pfnPrintf(pHlp, "MMX - Intel MMX Technology = %d (%d)\n", !!(uEdxGst & RT_BIT(23)), !!(uEdxHst & RT_BIT(23))); 3437 3438 pHlp->pfnPrintf(pHlp, "FXSR - FXSAVE and FXRSTOR Instructions = %d (%d)\n", !!(uEdxGst & RT_BIT(24)), !!(uEdxHst & RT_BIT(24))); 3438 pHlp->pfnPrintf(pHlp, " 25 - AMD fast FXSAVE and FXRSTOR Instr.= %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25)));3439 pHlp->pfnPrintf(pHlp, " 26 - 1 GB large page support= %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26)));3440 pHlp->pfnPrintf(pHlp, " 27 - RDTSCP instruction= %d (%d)\n", !!(uEdxGst & RT_BIT(27)), !!(uEdxHst & RT_BIT(27)));3439 pHlp->pfnPrintf(pHlp, "AMD fast FXSAVE and FXRSTOR Instr. = %d (%d)\n", !!(uEdxGst & RT_BIT(25)), !!(uEdxHst & RT_BIT(25))); 3440 pHlp->pfnPrintf(pHlp, "1 GB large page support = %d (%d)\n", !!(uEdxGst & RT_BIT(26)), !!(uEdxHst & RT_BIT(26))); 3441 pHlp->pfnPrintf(pHlp, "RDTSCP instruction = %d (%d)\n", !!(uEdxGst & RT_BIT(27)), !!(uEdxHst & RT_BIT(27))); 3441 3442 pHlp->pfnPrintf(pHlp, "28 - Reserved = %d (%d)\n", !!(uEdxGst & RT_BIT(28)), !!(uEdxHst & RT_BIT(28))); 3442 pHlp->pfnPrintf(pHlp, " 29 - AMD Long Mode= %d (%d)\n", !!(uEdxGst & RT_BIT(29)), !!(uEdxHst & RT_BIT(29)));3443 pHlp->pfnPrintf(pHlp, " 30 - AMD Extensions to 3DNow= %d (%d)\n", !!(uEdxGst & RT_BIT(30)), !!(uEdxHst & RT_BIT(30)));3444 pHlp->pfnPrintf(pHlp, " 31 - AMD 3DNow= %d (%d)\n", !!(uEdxGst & RT_BIT(31)), !!(uEdxHst & RT_BIT(31)));3443 pHlp->pfnPrintf(pHlp, "AMD Long Mode / Intel 64 ISA = %d (%d)\n", !!(uEdxGst & RT_BIT(29)), !!(uEdxHst & RT_BIT(29))); 3444 pHlp->pfnPrintf(pHlp, "AMD Extensions to 3DNow! = %d (%d)\n", !!(uEdxGst & RT_BIT(30)), !!(uEdxHst & RT_BIT(30))); 3445 pHlp->pfnPrintf(pHlp, "AMD 3DNow! = %d (%d)\n", !!(uEdxGst & RT_BIT(31)), !!(uEdxHst & RT_BIT(31))); 3445 3446 3446 3447 uint32_t uEcxGst = Guest.ecx; -
trunk/src/VBox/VMM/VMMR3/HWACCM.cpp
r41965 r42024 110 110 EXIT_REASON(VMX_EXIT_EPT_MISCONFIG , 49, "EPT misconfiguration. An attempt to access memory with a guest-physical address encountered a misconfigured EPT paging-structure entry."), 111 111 EXIT_REASON(VMX_EXIT_INVEPT , 50, "INVEPT. Guest software attempted to execute INVEPT."), 112 EXIT_REASON _NIL(),112 EXIT_REASON(VMX_EXIT_RDTSCP , 51, "Guest software attempted to execute RDTSCP."), 113 113 EXIT_REASON(VMX_EXIT_PREEMPTION_TIMER , 52, "VMX-preemption timer expired. The preemption timer counted down to zero."), 114 114 EXIT_REASON(VMX_EXIT_INVVPID , 53, "INVVPID. Guest software attempted to execute INVVPID."), … … 514 514 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCpuid, "/HWACCM/CPU%d/Exit/Instr/Cpuid"); 515 515 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtsc, "/HWACCM/CPU%d/Exit/Instr/Rdtsc"); 516 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtscp, "/HWACCM/CPU%d/Exit/Instr/Rdtscp"); 516 517 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdpmc, "/HWACCM/CPU%d/Exit/Instr/Rdpmc"); 517 518 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdmsr, "/HWACCM/CPU%d/Exit/Instr/Rdmsr"); … … 928 929 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT) 929 930 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT\n")); 930 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP _EXIT)931 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP _EXIT\n"));931 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 932 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP\n")); 932 933 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC) 933 934 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC\n")); … … 946 947 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT) 947 948 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT *must* be set\n")); 948 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP _EXIT)949 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP _EXIT*must* be set\n"));949 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 950 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP *must* be set\n")); 950 951 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC) 951 952 LogRel(("HWACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC *must* be set\n")); … … 1090 1091 LogRel(("HWACCM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc))); 1091 1092 else 1092 LogRel(("HWACCM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x - erratum detected, using %x instead\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc), pVM->hwaccm.s.vmx.cPreemptTimerShift)); 1093 { 1094 LogRel(("HWACCM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x - erratum detected, using %x instead\n", 1095 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc), pVM->hwaccm.s.vmx.cPreemptTimerShift)); 1096 } 1093 1097 LogRel(("HWACCM: MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hwaccm.s.vmx.msr.vmx_misc))); 1094 1098 LogRel(("HWACCM: MSR_IA32_VMX_MISC_CR3_TARGET %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hwaccm.s.vmx.msr.vmx_misc))); … … 1188 1192 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */ 1189 1193 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF); 1190 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX E);1194 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX); 1191 1195 } 1192 1196 else … … 1195 1199 if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE) 1196 1200 && (pVM->hwaccm.s.vmx.hostEFER & MSR_K6_EFER_NXE)) 1197 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX E);1201 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX); 1198 1202 1199 1203 LogRel((pVM->hwaccm.s.fAllow64BitGuests … … 1248 1252 ASMCpuId(0x80000000, &u32Eax, &u32Dummy, &u32Dummy, &u32Dummy); 1249 1253 if ( u32Eax < 0x80000001 1250 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_ AMD_FEATURE_EDX_LONG_MODE))1254 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)) 1251 1255 { 1252 1256 pVM->hwaccm.s.fTRPPatchingAllowed = false; … … 1377 1381 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP); 1378 1382 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); 1379 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);1380 1383 #ifdef VBOX_ENABLE_64_BITS_GUESTS 1381 1384 if (pVM->hwaccm.s.fAllow64BitGuests) … … 1383 1386 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE); 1384 1387 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE); 1385 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX E);1388 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX); 1386 1389 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF); 1387 1390 } … … 1389 1392 /* Turn on NXE if PAE has been enabled. */ 1390 1393 if (CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE)) 1391 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX E);1394 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX); 1392 1395 #endif 1393 1396 -
trunk/src/VBox/VMM/VMMR3/VMMSwitcher.cpp
r41976 r42024 536 536 { 537 537 uint32_t u32OrMask = MSR_K6_EFER_LME | MSR_K6_EFER_SCE; 538 /** note: we don't care if cpuid 0x8000001 isn't supported as that implies long mode isn't either, so this switcher would never be used. */ 539 if (!!(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX)) 538 /* 539 * We don't care if cpuid 0x8000001 isn't supported as that implies 540 * long mode isn't supported either, so this switched would never be used. 541 */ 542 if (!!(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX)) 540 543 u32OrMask |= MSR_K6_EFER_NXE; 541 544 -
trunk/src/VBox/VMM/include/HWACCMInternal.h
r41783 r42024 792 792 STAMCOUNTER StatExitCpuid; 793 793 STAMCOUNTER StatExitRdtsc; 794 STAMCOUNTER StatExitRdtscp; 794 795 STAMCOUNTER StatExitRdpmc; 795 796 STAMCOUNTER StatExitCli;
Note:
See TracChangeset
for help on using the changeset viewer.