- Timestamp:
- Dec 5, 2014 4:10:20 PM (10 years ago)
- Location:
- trunk
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Config.kmk
r53424 r53467 384 384 # Enables the first step using IEM (the interpreter). 385 385 VBOX_WITH_FIRST_IEM_STEP = 1 386 # Enables the new MSR code.387 VBOX_WITH_NEW_MSR_CODE = 1388 386 ## @} 389 387 -
trunk/src/VBox/VMM/Makefile.kmk
r51643 r53467 75 75 ifdef IEM_VERIFICATION_MODE 76 76 VMM_COMMON_DEFS += IEM_VERIFICATION_MODE IEM_VERIFICATION_MODE_FULL IEM_VERIFICATION_MODE_FULL_HM 77 endif78 79 ifdef VBOX_WITH_NEW_MSR_CODE80 VMM_COMMON_DEFS += VBOX_WITH_NEW_MSR_CODE81 77 endif 82 78 -
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r53466 r53467 4966 4966 } 4967 4967 4968 #ifdef VBOX_WITH_NEW_MSR_CODE4969 4968 4970 4969 /** … … 5125 5124 return rcStrict; 5126 5125 } 5127 5128 #endif /* VBOX_WITH_NEW_MSR_CODE */5129 5126 5130 5127 -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r52770 r53467 869 869 pVCpu->cpum.s.Guest.msrEFER = val; 870 870 } 871 872 #ifndef VBOX_WITH_NEW_MSR_CODE873 874 /**875 * Worker for CPUMQueryGuestMsr().876 *877 * @retval VINF_SUCCESS878 * @retval VERR_CPUM_RAISE_GP_0879 * @param pVCpu The cross context CPU structure.880 * @param idMsr The MSR to read.881 * @param puValue Where to store the return value.882 */883 static int cpumQueryGuestMsrInt(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)884 {885 /*886 * If we don't indicate MSR support in the CPUID feature bits, indicate887 * that a #GP(0) should be raised.888 */889 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))890 {891 *puValue = 0;892 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */893 }894 895 int rc = VINF_SUCCESS;896 uint8_t const u8Multiplier = 4;897 switch (idMsr)898 {899 case MSR_IA32_TSC:900 *puValue = TMCpuTickGet(pVCpu);901 break;902 903 case MSR_IA32_APICBASE:904 {905 /* See @bugref{7097} comment #6. */906 PVM pVM = pVCpu->CTX_SUFF(pVM);907 if (PDMHasApic(pVM))908 *puValue = pVCpu->cpum.s.Guest.msrApicBase;909 else910 {911 rc = VERR_CPUM_RAISE_GP_0;912 *puValue = 0;913 }914 break;915 }916 917 case MSR_IA32_CR_PAT:918 *puValue = pVCpu->cpum.s.Guest.msrPAT;919 break;920 921 case MSR_IA32_SYSENTER_CS:922 *puValue = pVCpu->cpum.s.Guest.SysEnter.cs;923 break;924 925 case MSR_IA32_SYSENTER_EIP:926 *puValue = pVCpu->cpum.s.Guest.SysEnter.eip;927 break;928 929 case MSR_IA32_SYSENTER_ESP:930 *puValue = pVCpu->cpum.s.Guest.SysEnter.esp;931 break;932 933 case MSR_IA32_MTRR_CAP:934 {935 /* This is currently a bit weird. :-) */936 uint8_t const cVariableRangeRegs = 0;937 bool const fSystemManagementRangeRegisters = false;938 bool const fFixedRangeRegisters = false;939 bool const fWriteCombiningType = false;940 *puValue = cVariableRangeRegs941 | (fFixedRangeRegisters ? RT_BIT_64(8) : 0)942 | (fWriteCombiningType ? RT_BIT_64(10) : 0)943 | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0);944 break;945 }946 947 case IA32_MTRR_PHYSBASE0: case IA32_MTRR_PHYSMASK0:948 case IA32_MTRR_PHYSBASE1: case IA32_MTRR_PHYSMASK1:949 case IA32_MTRR_PHYSBASE2: case IA32_MTRR_PHYSMASK2:950 case IA32_MTRR_PHYSBASE3: case IA32_MTRR_PHYSMASK3:951 case IA32_MTRR_PHYSBASE4: case IA32_MTRR_PHYSMASK4:952 case IA32_MTRR_PHYSBASE5: case IA32_MTRR_PHYSMASK5:953 case IA32_MTRR_PHYSBASE6: case IA32_MTRR_PHYSMASK6:954 case IA32_MTRR_PHYSBASE7: case IA32_MTRR_PHYSMASK7:955 /** @todo implement variable MTRRs. */956 *puValue = 0;957 break;958 #if 0 /** @todo newer CPUs have more, figure since when and do selective GP(). */959 case IA32_MTRR_PHYSBASE8: case IA32_MTRR_PHYSMASK8:960 case IA32_MTRR_PHYSBASE9: case IA32_MTRR_PHYSMASK9:961 *puValue = 0;962 break;963 #endif964 965 case MSR_IA32_MTRR_DEF_TYPE:966 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType;967 break;968 969 case IA32_MTRR_FIX64K_00000:970 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000;971 break;972 case IA32_MTRR_FIX16K_80000:973 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000;974 break;975 case IA32_MTRR_FIX16K_A0000:976 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000;977 break;978 case IA32_MTRR_FIX4K_C0000:979 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000;980 break;981 case IA32_MTRR_FIX4K_C8000:982 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000;983 break;984 case IA32_MTRR_FIX4K_D0000:985 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000;986 break;987 case IA32_MTRR_FIX4K_D8000:988 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000;989 break;990 case IA32_MTRR_FIX4K_E0000:991 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000;992 break;993 case IA32_MTRR_FIX4K_E8000:994 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000;995 break;996 case IA32_MTRR_FIX4K_F0000:997 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000;998 break;999 case IA32_MTRR_FIX4K_F8000:1000 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000;1001 break;1002 1003 case MSR_K6_EFER:1004 *puValue = pVCpu->cpum.s.Guest.msrEFER;1005 break;1006 1007 case MSR_K8_SF_MASK:1008 *puValue = pVCpu->cpum.s.Guest.msrSFMASK;1009 break;1010 1011 case MSR_K6_STAR:1012 *puValue = pVCpu->cpum.s.Guest.msrSTAR;1013 break;1014 1015 case MSR_K8_LSTAR:1016 *puValue = pVCpu->cpum.s.Guest.msrLSTAR;1017 break;1018 1019 case MSR_K8_CSTAR:1020 *puValue = pVCpu->cpum.s.Guest.msrCSTAR;1021 break;1022 1023 case MSR_K8_FS_BASE:1024 *puValue = pVCpu->cpum.s.Guest.fs.u64Base;1025 break;1026 1027 case MSR_K8_GS_BASE:1028 *puValue = pVCpu->cpum.s.Guest.gs.u64Base;1029 break;1030 1031 case MSR_K8_KERNEL_GS_BASE:1032 *puValue = pVCpu->cpum.s.Guest.msrKERNELGSBASE;1033 break;1034 1035 case MSR_K8_TSC_AUX:1036 *puValue = pVCpu->cpum.s.GuestMsrs.msr.TscAux;1037 break;1038 1039 case MSR_IA32_PERF_STATUS:1040 /** @todo could really be not exactly correct, maybe use host's values1041 * Apple code indicates that we should use CPU Hz / 1.333MHz here. */1042 /** @todo Where are the specs implemented here found? */1043 *puValue = UINT64_C(1000) /* TSC increment by tick */1044 | ((uint64_t)u8Multiplier << 24) /* CPU multiplier (aka bus ratio) min */1045 | ((uint64_t)u8Multiplier << 40) /* CPU multiplier (aka bus ratio) max */;1046 break;1047 1048 case MSR_IA32_FSB_CLOCK_STS:1049 /*1050 * Encoded as:1051 * 0 - 2661052 * 1 - 1331053 * 2 - 2001054 * 3 - return 1661055 * 5 - return 1001056 */1057 *puValue = (2 << 4);1058 break;1059 1060 case MSR_IA32_PLATFORM_INFO:1061 *puValue = ((uint32_t)u8Multiplier << 8) /* Flex ratio max */1062 | ((uint64_t)u8Multiplier << 40) /* Flex ratio min */;1063 break;1064 1065 case MSR_IA32_THERM_STATUS:1066 /* CPU temperature relative to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */1067 *puValue = RT_BIT(31) /* validity bit */1068 | (UINT64_C(20) << 16) /* degrees till TCC */;1069 break;1070 1071 case MSR_IA32_MISC_ENABLE:1072 #if 01073 /* Needs to be tested more before enabling. */1074 *puValue = pVCpu->cpum.s.GuestMsr.msr.miscEnable;1075 #else1076 /* Currenty we don't allow guests to modify enable MSRs. */1077 *puValue = MSR_IA32_MISC_ENABLE_FAST_STRINGS /* by default */;1078 1079 if ((pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR) != 0)1080 1081 *puValue |= MSR_IA32_MISC_ENABLE_MONITOR /* if mwait/monitor available */;1082 /** @todo: add more cpuid-controlled features this way. */1083 #endif1084 break;1085 1086 /** @todo virtualize DEBUGCTL and relatives */1087 case MSR_IA32_DEBUGCTL:1088 *puValue = 0;1089 break;1090 1091 #if 0 /*def IN_RING0 */1092 case MSR_IA32_PLATFORM_ID:1093 case MSR_IA32_BIOS_SIGN_ID:1094 if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL)1095 {1096 /* Available since the P6 family. VT-x implies that this feature is present. */1097 if (idMsr == MSR_IA32_PLATFORM_ID)1098 *puValue = ASMRdMsr(MSR_IA32_PLATFORM_ID);1099 else if (idMsr == MSR_IA32_BIOS_SIGN_ID)1100 *puValue = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);1101 break;1102 }1103 /* no break */1104 #endif1105 /*1106 * The BIOS_SIGN_ID MSR and MSR_IA32_MCP_CAP et al exist on AMD64 as1107 * well, at least bulldozer have them. Windows 7 is querying them.1108 * XP has been observed querying MSR_IA32_MC0_CTL.1109 * XP64 has been observed querying MSR_P4_LASTBRANCH_0 (also on AMD).1110 */1111 case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */1112 case MSR_IA32_MCG_CAP: /* fam/mod >= 6_01 */1113 case MSR_IA32_MCG_STATUS: /* indicated as not present in CAP */1114 /*case MSR_IA32_MCG_CTRL: - indicated as not present in CAP */1115 case MSR_IA32_MC0_CTL:1116 case MSR_IA32_MC0_STATUS:1117 case MSR_P4_LASTBRANCH_0:1118 case MSR_P4_LASTBRANCH_1:1119 case MSR_P4_LASTBRANCH_2:1120 case MSR_P4_LASTBRANCH_3:1121 *puValue = 0;1122 break;1123 1124 1125 /*1126 * Intel specifics MSRs:1127 */1128 case MSR_P5_MC_ADDR:1129 case MSR_P5_MC_TYPE:1130 case MSR_P4_LASTBRANCH_TOS: /** @todo Are these branch regs still here on more recent CPUs? The documentation doesn't mention them for several archs. */1131 case MSR_IA32_PERFEVTSEL0: /* NetWare 6.5 wants the these four. (Bet on AMD as well.) */1132 case MSR_IA32_PERFEVTSEL1:1133 case MSR_IA32_PMC0:1134 case MSR_IA32_PMC1:1135 case MSR_IA32_PLATFORM_ID: /* fam/mod >= 6_01 */1136 case MSR_IA32_MPERF: /* intel_pstate depends on this but does a validation test */1137 case MSR_IA32_APERF: /* intel_pstate depends on this but does a validation test */1138 /*case MSR_IA32_BIOS_UPDT_TRIG: - write-only? */1139 case MSR_RAPL_POWER_UNIT:1140 case MSR_BBL_CR_CTL3: /* ca. core arch? */1141 case MSR_PKG_CST_CONFIG_CONTROL: /* Nahalem, Sandy Bridge */1142 case MSR_CORE_THREAD_COUNT: /* Apple queries this. */1143 case MSR_FLEX_RATIO: /* Apple queries this. */1144 *puValue = 0;1145 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL)1146 {1147 Log(("CPUM: MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr));1148 rc = VERR_CPUM_RAISE_GP_0;1149 break;1150 }1151 1152 /* Provide more plausive values for some of them. */1153 switch (idMsr)1154 {1155 case MSR_RAPL_POWER_UNIT:1156 *puValue = RT_MAKE_U32_FROM_U8(3 /* power units (1/8 W)*/,1157 16 /* 15.3 micro-Joules */,1158 10 /* 976 microseconds increments */,1159 0);1160 break;1161 case MSR_BBL_CR_CTL3:1162 *puValue = RT_MAKE_U32_FROM_U8(1, /* bit 0 - L2 Hardware Enabled. (RO) */1163 1, /* bit 8 - L2 Enabled (R/W). */1164 0, /* bit 23 - L2 Not Present (RO). */1165 0);1166 break;1167 case MSR_PKG_CST_CONFIG_CONTROL:1168 *puValue = pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl;1169 break;1170 case MSR_CORE_THREAD_COUNT:1171 {1172 /** @todo restrict this to nehalem. */1173 PVM pVM = pVCpu->CTX_SUFF(pVM); /* Note! Not sweating the 4-bit core count limit on westmere. */1174 *puValue = (pVM->cCpus & 0xffff) | ((pVM->cCpus & 0xffff) << 16);1175 break;1176 }1177 1178 case MSR_FLEX_RATIO:1179 {1180 /** @todo Check for P4, it's different there. Try find accurate specs. */1181 *puValue = (uint32_t)u8Multiplier << 8;1182 break;1183 }1184 }1185 break;1186 1187 #if 0 /* Only on pentium CPUs! */1188 /* Event counters, not supported. */1189 case MSR_IA32_CESR:1190 case MSR_IA32_CTR0:1191 case MSR_IA32_CTR1:1192 *puValue = 0;1193 break;1194 #endif1195 1196 1197 /*1198 * AMD specific MSRs:1199 */1200 case MSR_K8_SYSCFG:1201 case MSR_K8_INT_PENDING:1202 case MSR_K8_NB_CFG: /* (All known values are 0 on reset.) */1203 case MSR_K8_HWCR: /* Very interesting bits here. :) */1204 case MSR_K8_VM_CR: /* Windows 8 */1205 case 0xc0011029: /* quick fix for FreeBSD 9.1. */1206 case 0xc0010042: /* quick fix for something. */1207 case 0xc001102a: /* quick fix for w2k8 + opposition. */1208 case 0xc0011004: /* quick fix for the opposition. */1209 case 0xc0011005: /* quick fix for the opposition. */1210 case MSR_K7_EVNTSEL0: /* quick fix for the opposition. */1211 case MSR_K7_EVNTSEL1: /* quick fix for the opposition. */1212 case MSR_K7_EVNTSEL2: /* quick fix for the opposition. */1213 case MSR_K7_EVNTSEL3: /* quick fix for the opposition. */1214 case MSR_K7_PERFCTR0: /* quick fix for the opposition. */1215 case MSR_K7_PERFCTR1: /* quick fix for the opposition. */1216 case MSR_K7_PERFCTR2: /* quick fix for the opposition. */1217 case MSR_K7_PERFCTR3: /* quick fix for the opposition. */1218 *puValue = 0;1219 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_AMD)1220 {1221 Log(("CPUM: MSR %#x is AMD, the virtual CPU isn't an Intel one -> #GP\n", idMsr));1222 return VERR_CPUM_RAISE_GP_0;1223 }1224 /* ignored */1225 break;1226 1227 default:1228 /*1229 * Hand the X2APIC range to PDM and the APIC.1230 */1231 if ( idMsr >= MSR_IA32_X2APIC_START1232 && idMsr <= MSR_IA32_X2APIC_END)1233 {1234 rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue);1235 if (RT_SUCCESS(rc))1236 rc = VINF_SUCCESS;1237 else1238 {1239 *puValue = 0;1240 rc = VERR_CPUM_RAISE_GP_0;1241 }1242 }1243 else1244 {1245 *puValue = 0;1246 rc = VERR_CPUM_RAISE_GP_0;1247 }1248 break;1249 }1250 1251 return rc;1252 }1253 1254 1255 /**1256 * Query an MSR.1257 *1258 * The caller is responsible for checking privilege if the call is the result1259 * of a RDMSR instruction. We'll do the rest.1260 *1261 * @retval VINF_SUCCESS on success.1262 * @retval VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is1263 * expected to take the appropriate actions. @a *puValue is set to 0.1264 * @param pVCpu Pointer to the VMCPU.1265 * @param idMsr The MSR.1266 * @param puValue Where to return the value.1267 *1268 * @remarks This will always return the right values, even when we're in the1269 * recompiler.1270 */1271 VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue)1272 {1273 int rc = cpumQueryGuestMsrInt(pVCpu, idMsr, puValue);1274 LogFlow(("CPUMQueryGuestMsr: %#x -> %llx rc=%d\n", idMsr, *puValue, rc));1275 return rc;1276 }1277 1278 1279 /**1280 * Sets the MSR.1281 *1282 * The caller is responsible for checking privilege if the call is the result1283 * of a WRMSR instruction. We'll do the rest.1284 *1285 * @retval VINF_SUCCESS on success.1286 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the1287 * appropriate actions.1288 *1289 * @param pVCpu Pointer to the VMCPU.1290 * @param idMsr The MSR id.1291 * @param uValue The value to set.1292 *1293 * @remarks Everyone changing MSR values, including the recompiler, shall do it1294 * by calling this method. This makes sure we have current values and1295 * that we trigger all the right actions when something changes.1296 */1297 VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue)1298 {1299 LogFlow(("CPUMSetGuestMsr: %#x <- %#llx\n", idMsr, uValue));1300 1301 /*1302 * If we don't indicate MSR support in the CPUID feature bits, indicate1303 * that a #GP(0) should be raised.1304 */1305 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR))1306 return VERR_CPUM_RAISE_GP_0; /** @todo isn't \#UD more correct if not supported? */1307 1308 int rc = VINF_SUCCESS;1309 switch (idMsr)1310 {1311 case MSR_IA32_MISC_ENABLE:1312 pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = uValue;1313 break;1314 1315 case MSR_IA32_TSC:1316 TMCpuTickSet(pVCpu->CTX_SUFF(pVM), pVCpu, uValue);1317 break;1318 1319 case MSR_IA32_APICBASE:1320 rc = PDMApicSetBase(pVCpu, uValue);1321 if (rc != VINF_SUCCESS)1322 rc = VERR_CPUM_RAISE_GP_0;1323 break;1324 1325 case MSR_IA32_CR_PAT:1326 pVCpu->cpum.s.Guest.msrPAT = uValue;1327 break;1328 1329 case MSR_IA32_SYSENTER_CS:1330 pVCpu->cpum.s.Guest.SysEnter.cs = uValue & 0xffff; /* 16 bits selector */1331 break;1332 1333 case MSR_IA32_SYSENTER_EIP:1334 pVCpu->cpum.s.Guest.SysEnter.eip = uValue;1335 break;1336 1337 case MSR_IA32_SYSENTER_ESP:1338 pVCpu->cpum.s.Guest.SysEnter.esp = uValue;1339 break;1340 1341 case MSR_IA32_MTRR_CAP:1342 return VERR_CPUM_RAISE_GP_0;1343 1344 case MSR_IA32_MTRR_DEF_TYPE:1345 if ( (uValue & UINT64_C(0xfffffffffffff300))1346 || ( (uValue & 0xff) != 01347 && (uValue & 0xff) != 11348 && (uValue & 0xff) != 41349 && (uValue & 0xff) != 51350 && (uValue & 0xff) != 6) )1351 {1352 Log(("CPUM: MSR_IA32_MTRR_DEF_TYPE: #GP(0) - writing reserved value (%#llx)\n", uValue));1353 return VERR_CPUM_RAISE_GP_0;1354 }1355 pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType = uValue;1356 break;1357 1358 case IA32_MTRR_PHYSBASE0: case IA32_MTRR_PHYSMASK0:1359 case IA32_MTRR_PHYSBASE1: case IA32_MTRR_PHYSMASK1:1360 case IA32_MTRR_PHYSBASE2: case IA32_MTRR_PHYSMASK2:1361 case IA32_MTRR_PHYSBASE3: case IA32_MTRR_PHYSMASK3:1362 case IA32_MTRR_PHYSBASE4: case IA32_MTRR_PHYSMASK4:1363 case IA32_MTRR_PHYSBASE5: case IA32_MTRR_PHYSMASK5:1364 case IA32_MTRR_PHYSBASE6: case IA32_MTRR_PHYSMASK6:1365 case IA32_MTRR_PHYSBASE7: case IA32_MTRR_PHYSMASK7:1366 /** @todo implement variable MTRRs. */1367 break;1368 #if 0 /** @todo newer CPUs have more, figure since when and do selective GP(). */1369 case IA32_MTRR_PHYSBASE8: case IA32_MTRR_PHYSMASK8:1370 case IA32_MTRR_PHYSBASE9: case IA32_MTRR_PHYSMASK9:1371 break;1372 #endif1373 1374 case IA32_MTRR_FIX64K_00000:1375 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix64K_00000 = uValue;1376 break;1377 case IA32_MTRR_FIX16K_80000:1378 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_80000 = uValue;1379 break;1380 case IA32_MTRR_FIX16K_A0000:1381 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix16K_A0000 = uValue;1382 break;1383 case IA32_MTRR_FIX4K_C0000:1384 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C0000 = uValue;1385 break;1386 case IA32_MTRR_FIX4K_C8000:1387 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_C8000 = uValue;1388 break;1389 case IA32_MTRR_FIX4K_D0000:1390 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D0000 = uValue;1391 break;1392 case IA32_MTRR_FIX4K_D8000:1393 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_D8000 = uValue;1394 break;1395 case IA32_MTRR_FIX4K_E0000:1396 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E0000 = uValue;1397 break;1398 case IA32_MTRR_FIX4K_E8000:1399 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_E8000 = uValue;1400 break;1401 case IA32_MTRR_FIX4K_F0000:1402 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F0000 = uValue;1403 break;1404 case IA32_MTRR_FIX4K_F8000:1405 pVCpu->cpum.s.GuestMsrs.msr.MtrrFix4K_F8000 = uValue;1406 break;1407 1408 /*1409 * AMD64 MSRs.1410 */1411 case MSR_K6_EFER:1412 {1413 PVM pVM = pVCpu->CTX_SUFF(pVM);1414 uint64_t const uOldEFER = pVCpu->cpum.s.Guest.msrEFER;1415 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x800000011416 ? pVM->cpum.s.aGuestCpuIdExt[1].edx1417 : 0;1418 uint64_t fMask = 0;1419 uint64_t fIgnoreMask = MSR_K6_EFER_LMA;1420 1421 /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */1422 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX)1423 fMask |= MSR_K6_EFER_NXE;1424 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)1425 fMask |= MSR_K6_EFER_LME;1426 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)1427 fMask |= MSR_K6_EFER_SCE;1428 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)1429 fMask |= MSR_K6_EFER_FFXSR;1430 1431 /* #GP(0) If anything outside the allowed bits is set. */1432 if (uValue & ~(fIgnoreMask | fMask))1433 {1434 Log(("CPUM: Settings disallowed EFER bit. uValue=%#RX64 fAllowed=%#RX64 -> #GP(0)\n", uValue, fMask));1435 return VERR_CPUM_RAISE_GP_0;1436 }1437 1438 /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if1439 paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */1440 if ( (uOldEFER & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME)1441 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG))1442 {1443 Log(("CPUM: Illegal MSR_K6_EFER_LME change: paging is enabled!!\n"));1444 return VERR_CPUM_RAISE_GP_0;1445 }1446 1447 /* There are a few more: e.g. MSR_K6_EFER_LMSLE */1448 AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)),1449 ("Unexpected value %RX64\n", uValue));1450 pVCpu->cpum.s.Guest.msrEFER = (uOldEFER & ~fMask) | (uValue & fMask);1451 1452 /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB1453 if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */1454 if ( (uOldEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))1455 != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)))1456 {1457 /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/);1458 HMFlushTLB(pVCpu);1459 1460 /* Notify PGM about NXE changes. */1461 if ( (uOldEFER & MSR_K6_EFER_NXE)1462 != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE))1463 PGMNotifyNxeChanged(pVCpu, !(uOldEFER & MSR_K6_EFER_NXE));1464 }1465 break;1466 }1467 1468 case MSR_K8_SF_MASK:1469 pVCpu->cpum.s.Guest.msrSFMASK = uValue;1470 break;1471 1472 case MSR_K6_STAR:1473 pVCpu->cpum.s.Guest.msrSTAR = uValue;1474 break;1475 1476 case MSR_K8_LSTAR:1477 pVCpu->cpum.s.Guest.msrLSTAR = uValue;1478 break;1479 1480 case MSR_K8_CSTAR:1481 pVCpu->cpum.s.Guest.msrCSTAR = uValue;1482 break;1483 1484 case MSR_K8_FS_BASE:1485 pVCpu->cpum.s.Guest.fs.u64Base = uValue;1486 break;1487 1488 case MSR_K8_GS_BASE:1489 pVCpu->cpum.s.Guest.gs.u64Base = uValue;1490 break;1491 1492 case MSR_K8_KERNEL_GS_BASE:1493 pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue;1494 break;1495 1496 case MSR_K8_TSC_AUX:1497 pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue;1498 break;1499 1500 case MSR_IA32_DEBUGCTL:1501 /** @todo virtualize DEBUGCTL and relatives */1502 break;1503 1504 /*1505 * Intel specifics MSRs:1506 */1507 /*case MSR_IA32_PLATFORM_ID: - read-only */1508 case MSR_IA32_BIOS_SIGN_ID: /* fam/mod >= 6_01 */1509 case MSR_IA32_BIOS_UPDT_TRIG: /* fam/mod >= 6_01 */1510 /*case MSR_IA32_MCP_CAP: - read-only */1511 /*case MSR_IA32_MCG_STATUS: - read-only */1512 /*case MSR_IA32_MCG_CTRL: - indicated as not present in CAP */1513 /*case MSR_IA32_MC0_CTL: - read-only? */1514 /*case MSR_IA32_MC0_STATUS: - read-only? */1515 case MSR_PKG_CST_CONFIG_CONTROL:1516 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_INTEL)1517 {1518 Log(("CPUM: MSR %#x is Intel, the virtual CPU isn't an Intel one -> #GP\n", idMsr));1519 return VERR_CPUM_RAISE_GP_0;1520 }1521 1522 switch (idMsr)1523 {1524 case MSR_PKG_CST_CONFIG_CONTROL:1525 {1526 if (pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl & RT_BIT_64(15))1527 {1528 Log(("MSR_PKG_CST_CONFIG_CONTROL: Write protected -> #GP\n"));1529 return VERR_CPUM_RAISE_GP_0;1530 }1531 static uint64_t s_fMask = UINT64_C(0x01f08407); /** @todo Only Nehalem has 24; Only Sandy has 27 and 28. */1532 static uint64_t s_fGpInvalid = UINT64_C(0xffffffff00ff0000); /** @todo figure out exactly what's off limits. */1533 if ((uValue & s_fGpInvalid) || (uValue & 7) >= 5)1534 {1535 Log(("MSR_PKG_CST_CONFIG_CONTROL: Invalid value %#llx -> #GP\n", uValue));1536 return VERR_CPUM_RAISE_GP_0;1537 }1538 pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = uValue & s_fMask;1539 break;1540 }1541 1542 }1543 /* ignored */1544 break;1545 1546 /*1547 * AMD specific MSRs:1548 */1549 case MSR_K8_SYSCFG: /** @todo can be written, but we ignore that for now. */1550 case MSR_K8_INT_PENDING: /** @todo can be written, but we ignore that for now. */1551 case MSR_K8_NB_CFG: /** @todo can be written; the apicid swapping might be used and would need saving, but probably unnecessary. */1552 case 0xc0011029: /* quick fix for FreeBSd 9.1. */1553 case 0xc0010042: /* quick fix for something. */1554 case 0xc001102a: /* quick fix for w2k8 + opposition. */1555 case 0xc0011004: /* quick fix for the opposition. */1556 case 0xc0011005: /* quick fix for the opposition. */1557 case MSR_K7_EVNTSEL0: /* quick fix for the opposition. */1558 case MSR_K7_EVNTSEL1: /* quick fix for the opposition. */1559 case MSR_K7_EVNTSEL2: /* quick fix for the opposition. */1560 case MSR_K7_EVNTSEL3: /* quick fix for the opposition. */1561 case MSR_K7_PERFCTR0: /* quick fix for the opposition. */1562 case MSR_K7_PERFCTR1: /* quick fix for the opposition. */1563 case MSR_K7_PERFCTR2: /* quick fix for the opposition. */1564 case MSR_K7_PERFCTR3: /* quick fix for the opposition. */1565 if (CPUMGetGuestCpuVendor(pVCpu->CTX_SUFF(pVM)) != CPUMCPUVENDOR_AMD)1566 {1567 Log(("CPUM: MSR %#x is AMD, the virtual CPU isn't an Intel one -> #GP\n", idMsr));1568 return VERR_CPUM_RAISE_GP_0;1569 }1570 /* ignored */1571 break;1572 1573 1574 default:1575 /*1576 * Hand the X2APIC range to PDM and the APIC.1577 */1578 if ( idMsr >= MSR_IA32_X2APIC_START1579 && idMsr <= MSR_IA32_X2APIC_END)1580 {1581 rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue);1582 if (rc != VINF_SUCCESS)1583 rc = VERR_CPUM_RAISE_GP_0;1584 }1585 else1586 {1587 /* We should actually trigger a #GP here, but don't as that might cause more trouble. */1588 /** @todo rc = VERR_CPUM_RAISE_GP_0 */1589 Log(("CPUMSetGuestMsr: Unknown MSR %#x attempted set to %#llx\n", idMsr, uValue));1590 }1591 break;1592 }1593 return rc;1594 }1595 1596 #endif /* !VBOX_WITH_NEW_MSR_CODE */1597 871 1598 872
Note:
See TracChangeset
for help on using the changeset viewer.