Changeset 49520 in vbox for trunk/src/VBox
- Timestamp:
- Nov 18, 2013 9:30:29 AM (11 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r49478 r49520 64 64 /** Use the function table. */ 65 65 #define HMVMX_USE_FUNCTION_TABLE 66 67 /**68 * The maximum number of MSRs we are willing to swap during a world-switch.69 * Intel claims 512/check capability MSR, we don't want to do anywhere close70 * to that. See Intel spec. 24.7.2 "VM-Exit Controls for MSRs"71 *72 * Bump this count as and when required, there's no backward compatibility73 * requirement.74 */75 #define HMVMX_MAX_SWAP_MSR_COUNT 576 66 77 67 /** Determine which tagged-TLB flush handler to use. */ … … 95 85 #define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10) 96 86 #define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11) 97 #define HMVMX_UPDATED_GUEST_FS_BASE_MSR RT_BIT(12) 98 #define HMVMX_UPDATED_GUEST_GS_BASE_MSR RT_BIT(13) 99 #define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(14) 100 #define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(15) 101 #define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(16) 102 #define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(17) 103 #define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(18) 104 #define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19) 87 #define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(12) 88 #define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(13) 89 #define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(14) 90 #define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(15) 91 #define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(16) 92 #define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(17) 105 93 #define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \ 106 94 | HMVMX_UPDATED_GUEST_RSP \ … … 115 103 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \ 116 104 | HMVMX_UPDATED_GUEST_DEBUG \ 117 | HMVMX_UPDATED_GUEST_FS_BASE_MSR \118 | HMVMX_UPDATED_GUEST_GS_BASE_MSR \119 105 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \ 120 106 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \ … … 309 295 VMXMSREXIT_PASSTHRU_READ 310 296 } VMXMSREXITREAD; 297 /** Pointer to MSR-bitmap read permissions. */ 298 typedef VMXMSREXITREAD* PVMXMSREXITREAD; 311 299 312 300 /** … … 320 308 VMXMSREXIT_PASSTHRU_WRITE 321 309 } VMXMSREXITWRITE; 310 /** Pointer to MSR-bitmap write permissions. */ 311 typedef VMXMSREXITWRITE* PVMXMSREXITWRITE; 312 322 313 323 314 /** … … 345 336 static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush); 346 337 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr); 347 static void hmR0VmxClearEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx);348 338 static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr, 349 339 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntState); … … 413 403 static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 414 404 static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 405 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS 415 406 static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 407 #endif 416 408 static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 417 409 … … 589 581 * 590 582 * @returns VBox status code. 591 * @param pVCpu Pointer to the VMCPU.592 583 * @param pVmxTransient Pointer to the VMX transient structure. 593 584 * 594 585 * @remarks No-long-jump zone!!! 595 586 */ 596 DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVM CPU pVCpu, PVMXTRANSIENT pVmxTransient)587 DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient) 597 588 { 598 589 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr); … … 607 598 * 608 599 * @returns VBox status code. 609 * @param pVCpu Pointer to the VMCPU.610 600 * @param pVmxTransient Pointer to the VMX transient structure. 611 601 */ 612 DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVM CPU pVCpu, PVMXTRANSIENT pVmxTransient)602 DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient) 613 603 { 614 604 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO)) … … 627 617 * 628 618 * @returns VBox status code. 629 * @param pVCpu Pointer to the VMCPU.630 619 * @param pVmxTransient Pointer to the VMX transient structure. 631 620 */ 632 DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVM CPU pVCpu, PVMXTRANSIENT pVmxTransient)621 DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient) 633 622 { 634 623 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE)) … … 650 639 * @param pVmxTransient Pointer to the VMX transient structure. 651 640 */ 652 DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVM CPU pVCpu, PVMXTRANSIENT pVmxTransient)641 DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient) 653 642 { 654 643 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN)) … … 667 656 * 668 657 * @returns VBox status code. 669 * @param pVCpu The cross context per CPU structure.670 658 * @param pVmxTransient Pointer to the VMX transient structure. 671 659 */ 672 DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVM CPU pVCpu, PVMXTRANSIENT pVmxTransient)660 DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient) 673 661 { 674 662 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO)) … … 686 674 * 687 675 * @returns VBox status code. 688 * @param pVCpu Pointer to the VMCPU.689 676 * @param pVmxTransient Pointer to the VMX transient structure. 690 677 */ 691 DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVM CPU pVCpu, PVMXTRANSIENT pVmxTransient)678 DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMXTRANSIENT pVmxTransient) 692 679 { 693 680 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION)) … … 1163 1150 1164 1151 1152 #ifdef VBOX_STRICT 1153 /** 1154 * Gets the permission bits for the specified MSR in the MSR bitmap. 1155 * 1156 * @returns VBox status code. 1157 * @retval VINF_SUCCESS if the specified MSR is found. 1158 * @retval VERR_NOT_FOUND if the specified MSR is not found. 1159 * @retval VERR_NOT_SUPPORTED if VT-x doesn't allow the MSR. 1160 * 1161 * @param pVCpu Pointer to the VMCPU. 1162 * @param uMsr The MSR. 1163 * @param penmRead Where to store the read permissions. 1164 * @param penmWrite Where to store the write permissions. 1165 */ 1166 static int hmR0VmxGetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, PVMXMSREXITREAD penmRead, PVMXMSREXITWRITE penmWrite) 1167 { 1168 AssertPtrReturn(penmRead, VERR_INVALID_PARAMETER); 1169 AssertPtrReturn(penmWrite, VERR_INVALID_PARAMETER); 1170 int32_t iBit; 1171 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap; 1172 1173 /* See hmR0VmxSetMsrPermission() for the layout. */ 1174 if (uMsr <= 0x00001FFF) 1175 iBit = uMsr; 1176 else if ( uMsr >= 0xC0000000 1177 && uMsr <= 0xC0001FFF) 1178 { 1179 iBit = (uMsr - 0xC0000000); 1180 pbMsrBitmap += 0x400; 1181 } 1182 else 1183 { 1184 AssertMsgFailed(("hmR0VmxGetMsrPermission: Invalid MSR %#RX32\n", uMsr)); 1185 return VERR_NOT_SUPPORTED; 1186 } 1187 1188 Assert(iBit <= 0x1fff); 1189 if (ASMBitTest(pbMsrBitmap, iBit)) 1190 *penmRead = VMXMSREXIT_INTERCEPT_READ; 1191 else 1192 *penmRead = VMXMSREXIT_PASSTHRU_READ; 1193 1194 if (ASMBitTest(pbMsrBitmap + 0x800, iBit)) 1195 *penmWrite = VMXMSREXIT_INTERCEPT_WRITE; 1196 else 1197 *penmWrite = VMXMSREXIT_PASSTHRU_WRITE; 1198 return VINF_SUCCESS; 1199 } 1200 #endif /* VBOX_STRICT */ 1201 1202 1165 1203 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 1166 1204 /** … … 1174 1212 DECLINLINE(int) hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs) 1175 1213 { 1176 /* Update the VCPU's copy of the guest MSR count. */ 1177 pVCpu->hm.s.vmx.cGuestMsrs = cMsrs; 1214 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */ 1215 uint32_t const cMaxSupportedMsrs = MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc); 1216 if (RT_UNLIKELY(cMsrs > cMaxSupportedMsrs)) 1217 { 1218 LogRel(("CPU auto-load/store MSR count in VMCS exceeded cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs)); 1219 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE; 1220 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 1221 } 1178 1222 1179 1223 /* Update number of guest MSRs to load/store across the world-switch. */ … … 1183 1227 /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */ 1184 1228 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs); AssertRCReturn(rc, rc); 1229 1230 /* Update the VCPU's copy of the MSR count. */ 1231 pVCpu->hm.s.vmx.cMsrs = cMsrs; 1232 1185 1233 return VINF_SUCCESS; 1186 1234 } … … 1188 1236 1189 1237 /** 1190 * Adds a guest/host MSR pair to be swapped during the world-switch as 1191 * part of the auto-load/store MSR area in the VMCS. 1238 * Adds a new (or updates the value of an existing) guest/host MSR 1239 * pair to be swapped during the world-switch as part of the 1240 * auto-load/store MSR area in the VMCS. 1241 * 1242 * @returns VBox status code. 1243 * @param pVCpu Pointer to the VMCPU. 1244 * @param uMsr The MSR. 1245 * @param uGuestMsr Value of the guest MSR. 1246 * @param fUpdateHostMsr Whether to update the value of the host MSR if 1247 * necessary. 1248 */ 1249 static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr) 1250 { 1251 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1252 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs; 1253 uint32_t i; 1254 for (i = 0; i < cMsrs; i++) 1255 { 1256 if (pGuestMsr->u32Msr == uMsr) 1257 break; 1258 pGuestMsr++; 1259 } 1260 1261 bool fAdded = false; 1262 if (i == cMsrs) 1263 { 1264 ++cMsrs; 1265 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs); 1266 AssertRCReturn(rc, rc); 1267 1268 /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */ 1269 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 1270 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 1271 1272 fAdded = true; 1273 } 1274 1275 /* Update the MSR values in the auto-load/store MSR area. */ 1276 pGuestMsr->u32Msr = uMsr; 1277 pGuestMsr->u64Value = uGuestMsrValue; 1278 1279 /* Create/update the MSR slot in the host MSR area. */ 1280 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 1281 pHostMsr += i; 1282 pHostMsr->u32Msr = uMsr; 1283 1284 /* 1285 * Update the host MSR only when requested by the called AND when we're 1286 * adding it to the auto-load/store area. Otherwise, it would have been 1287 * updated by hmR0VmxSaveHostMsrs(). We do this for performance reasons. 1288 */ 1289 if ( fAdded 1290 && fUpdateHostMsr) 1291 { 1292 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr); 1293 } 1294 1295 return VINF_SUCCESS; 1296 } 1297 1298 1299 /** 1300 * Removes a guest/shost MSR pair to be swapped during the world-switch from the 1301 * auto-load/store MSR area in the VMCS. 1302 * 1303 * Does not fail if the MSR in @a uMsr is not found in the auto-load/store MSR 1304 * area. 1192 1305 * 1193 1306 * @returns VBox status code. 1194 1307 * @param pVCpu Pointer to the VMCPU. 1195 1308 * @param uMsr The MSR. 1196 * @param uGuestMsr Value of the guest MSR.1197 * @param uHostMsr Value of the host MSR.1198 */1199 static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, uint64_t uHostMsrValue)1200 {1201 AssertMsg(HMVMX_MAX_SWAP_MSR_COUNT < MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc),1202 ("MSR swap count exceeded. Cpu reports %#RX32, our limit %#RX32\n",1203 MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc), HMVMX_MAX_SWAP_MSR_COUNT));1204 1205 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;1206 uint32_t cGuestMsrs = pVCpu->hm.s.vmx.cGuestMsrs;1207 uint32_t i;1208 for (i = 0; i < cGuestMsrs; i++)1209 {1210 if (pGuestMsr->u32Msr == uMsr)1211 break;1212 pGuestMsr++;1213 }1214 1215 AssertReturn(i < HMVMX_MAX_SWAP_MSR_COUNT, VERR_HM_MSR_SWAP_COUNT_EXCEEDED);1216 if (i == cGuestMsrs)1217 {1218 ++cGuestMsrs;1219 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */1220 if (RT_UNLIKELY(cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc)))1221 {1222 LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs));1223 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;1224 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;1225 }1226 1227 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cGuestMsrs);1228 AssertRCReturn(rc, rc);1229 }1230 1231 /* Update the MSR values in the auto-load/store MSR area. */1232 pGuestMsr->u32Msr = uMsr;1233 pGuestMsr->u64Value = uGuestMsrValue;1234 1235 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;1236 pHostMsr += i;1237 pHostMsr->u32Msr = uMsr;1238 pHostMsr->u64Value = uHostMsrValue;1239 1240 /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */1241 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS);1242 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);1243 return VINF_SUCCESS;1244 }1245 1246 1247 /**1248 * Removes a guest/shost MSR pair to be swapped during the world-switch from the1249 * auto-load/store MSR area in the VMCS.1250 *1251 * Does not fail if the MSR in @a uMsr is not found in the auto-load/store MSR1252 * area.1253 *1254 * @returns VBox status code.1255 * @param pVCpu Pointer to the VMCPU.1256 * @param uMsr The MSR.1257 1309 */ 1258 1310 static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr) 1259 1311 { 1260 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1261 uint32_t cGuestMsrs = pVCpu->hm.s.vmx.cGuestMsrs; 1262 uint32_t i; 1263 for (i = 0; i < cGuestMsrs; i++) 1312 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1313 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs; 1314 for (uint32_t i = 0; i < cMsrs; i++) 1264 1315 { 1265 1316 /* Find the MSR. */ … … 1267 1318 { 1268 1319 /* If it's the last MSR, simply reduce the count. */ 1269 if (i == c GuestMsrs - 1)1320 if (i == cMsrs - 1) 1270 1321 { 1271 --c GuestMsrs;1322 --cMsrs; 1272 1323 break; 1273 1324 } … … 1275 1326 /* Remove it by swapping the last MSR in place of it, and reducing the count. */ 1276 1327 PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1277 pLastGuestMsr += c GuestMsrs;1328 pLastGuestMsr += cMsrs; 1278 1329 pGuestMsr->u32Msr = pLastGuestMsr->u32Msr; 1279 1330 pGuestMsr->u64Value = pLastGuestMsr->u64Value; … … 1281 1332 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 1282 1333 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 1283 pLastHostMsr += c GuestMsrs;1334 pLastHostMsr += cMsrs; 1284 1335 pHostMsr->u32Msr = pLastHostMsr->u32Msr; 1285 1336 pHostMsr->u64Value = pLastHostMsr->u64Value; 1286 --c GuestMsrs;1337 --cMsrs; 1287 1338 break; 1288 1339 } … … 1291 1342 1292 1343 /* Update the VMCS if the count changed (meaning the MSR was found). */ 1293 if (c GuestMsrs != pVCpu->hm.s.vmx.cGuestMsrs)1294 { 1295 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, c GuestMsrs);1344 if (cMsrs != pVCpu->hm.s.vmx.cMsrs) 1345 { 1346 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs); 1296 1347 AssertRCReturn(rc, rc); 1297 } 1298 1299 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */ 1300 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE); 1348 1349 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */ 1350 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 1351 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE); 1352 } 1353 1301 1354 return VINF_SUCCESS; 1302 1355 } … … 1304 1357 1305 1358 /** 1306 * Updates the value of a host MSR in the auto-load/store area in the VMCS. 1307 * 1308 * @returns VBox status code. 1309 * @param pVCpu Pointer to the VMCPU. 1310 * @param uMsr The MSR. 1311 */ 1312 static int hmR0VmxUpdateAutoLoadStoreHostMsr(PVMCPU pVCpu, uint32_t uMsr) 1313 { 1314 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 1315 uint32_t cMsrs = pVCpu->hm.s.vmx.cGuestMsrs; 1316 1317 for (uint32_t i = 0; i < cMsrs; i++) 1318 { 1319 if (pHostMsr->u32Msr == uMsr) 1320 { 1321 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr); 1322 return VINF_SUCCESS; 1323 } 1324 } 1325 1326 return VERR_NOT_FOUND; 1327 } 1328 1359 * Checks if the specified guest MSR is part of the auto-load/store area in 1360 * the VMCS. 1361 * 1362 * @returns true if found, false otherwise. 1363 * @param pVCpu Pointer to the VMCPU. 1364 * @param uMsr The MSR to find. 1365 */ 1366 static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr) 1367 { 1368 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1369 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs; 1370 1371 for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++) 1372 { 1373 if (pGuestMsr->u32Msr == uMsr) 1374 return true; 1375 } 1376 return false; 1377 } 1378 1379 1380 /** 1381 * Updates the value of all host MSRs in the auto-load/store area in the VMCS. 1382 * 1383 * @param pVCpu Pointer to the VMCPU. 1384 */ 1385 static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu) 1386 { 1387 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1388 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 1389 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1390 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs; 1391 1392 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++) 1393 { 1394 AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr); 1395 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr); 1396 } 1397 1398 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true; 1399 } 1400 1401 1402 #ifdef VBOX_STRICT 1403 /** 1404 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the 1405 * VMCS are correct. 1406 * 1407 * @param pVCpu Pointer to the VMCPU. 1408 */ 1409 static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu) 1410 { 1411 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1412 1413 /* Verify MSR counts in the VMCS are what we think it should be. */ 1414 uint32_t cMsrs; 1415 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc); 1416 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs); 1417 1418 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs); AssertRC(rc); 1419 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs); 1420 1421 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc); 1422 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs); 1423 1424 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 1425 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1426 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++) 1427 { 1428 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */ 1429 AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32\n", pHostMsr->u32Msr, 1430 pGuestMsr->u32Msr)); 1431 1432 uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr); 1433 AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64\n", pHostMsr->u32Msr, 1434 pHostMsr->u64Value, u64Msr)); 1435 1436 /* Verify that the permissions are as expected in the MSR bitmap. */ 1437 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 1438 { 1439 VMXMSREXITREAD enmRead; 1440 VMXMSREXITWRITE enmWrite; 1441 rc = hmR0VmxGetMsrPermission(pVCpu, pGuestMsr->u32Msr, &enmRead, &enmWrite); 1442 AssertMsgReturnVoid(rc == VINF_SUCCESS, ("hmR0VmxGetMsrPermission! failed. rc=%Rrc\n", rc)); 1443 AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 No passthru read permission!\n", 1444 pGuestMsr->u32Msr)); 1445 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 No passthru write permission!\n", 1446 pGuestMsr->u32Msr)); 1447 } 1448 } 1449 } 1450 # endif /* VBOX_STRICT */ 1329 1451 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 1330 1452 … … 1381 1503 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr) 1382 1504 { 1505 NOREF(pVM); 1383 1506 AssertPtr(pVM); 1384 1507 Assert(pVM->hm.s.vmx.fVpid); … … 1464 1587 VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys) 1465 1588 { 1589 NOREF(pVM); NOREF(GCPhys); 1466 1590 LogFlowFunc(("%RGp\n", GCPhys)); 1467 1591 … … 2046 2170 /* 2047 2171 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored 2048 * automatically (either as part of the MSR-load/store areas or dedicated fields in the VMCS).2172 * automatically as dedicated fields in the VMCS. 2049 2173 */ 2050 2174 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 2051 2175 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 2052 2176 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 2053 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2054 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2055 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2056 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2057 2177 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 2058 2178 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); … … 2120 2240 2121 2241 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 2122 {2123 2242 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */ 2124 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)2125 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2126 }2127 2243 2128 2244 if ((val & zap) != val) … … 2160 2276 static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu) 2161 2277 { 2278 NOREF(pVM); 2162 2279 AssertPtr(pVM); 2163 2280 AssertPtr(pVCpu); … … 2429 2546 DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu) 2430 2547 { 2548 NOREF(pVM); NOREF(pVCpu); 2549 2431 2550 RTCCUINTREG uReg = ASMGetCR0(); 2432 2551 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg); … … 2489 2608 DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu) 2490 2609 { 2610 NOREF(pVM); 2491 2611 int rc = VERR_INTERNAL_ERROR_5; 2492 2612 … … 2714 2834 DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu) 2715 2835 { 2836 NOREF(pVM); 2837 2716 2838 AssertPtr(pVCpu); 2717 2839 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr); … … 2719 2841 int rc = VINF_SUCCESS; 2720 2842 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 2843 #if 0 2721 2844 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 2722 2845 uint32_t cHostMsrs = 0; … … 2762 2885 if (CPUMIsGuestInLongMode(pVCpu)) 2763 2886 { 2764 /* Must match the EFER value in our 64 bitsswitcher. */2887 /* Must match the EFER value in our 64-bit switcher. */ 2765 2888 pHostMsr->u64Value = u64HostEfer | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE; 2766 2889 } … … 2812 2935 2813 2936 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs); 2937 #endif 2938 2939 if (pVCpu->hm.s.vmx.cMsrs > 0) 2940 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu); 2814 2941 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 2815 2942 … … 2923 3050 DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 2924 3051 { 3052 NOREF(pMixedCtx); 3053 2925 3054 int rc = VINF_SUCCESS; 2926 3055 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS)) … … 2992 3121 DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 2993 3122 { 3123 NOREF(pMixedCtx); 3124 2994 3125 int rc = VINF_SUCCESS; 2995 3126 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE)) … … 3087 3218 static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState) 3088 3219 { 3220 NOREF(pVCpu); 3089 3221 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */ 3090 3222 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */ … … 3538 3670 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */ 3539 3671 u32GuestCR4 |= X86_CR4_PSE; 3540 /* Our identity mapping is a 32 bitspage directory. */3672 /* Our identity mapping is a 32-bit page directory. */ 3541 3673 u32GuestCR4 &= ~X86_CR4_PAE; 3542 3674 } … … 3953 4085 * @param idxAccess Index of the access rights of the segment in the VMCS. 3954 4086 * @param pSelReg Pointer to the segment selector. 3955 * @param pCtx Pointer to the guest-CPU context.3956 4087 * 3957 4088 * @remarks No-long-jump zone!!! 3958 4089 */ 3959 4090 static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, 3960 uint32_t idxAccess, PCPUMSELREG pSelReg , PCPUMCTX pCtx)4091 uint32_t idxAccess, PCPUMSELREG pSelReg) 3961 4092 { 3962 4093 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */ … … 4049 4180 #endif 4050 4181 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE, 4051 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs , pMixedCtx);4182 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs); 4052 4183 AssertRCReturn(rc, rc); 4053 4184 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE, 4054 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss , pMixedCtx);4185 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss); 4055 4186 AssertRCReturn(rc, rc); 4056 4187 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE, 4057 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds , pMixedCtx);4188 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds); 4058 4189 AssertRCReturn(rc, rc); 4059 4190 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE, 4060 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es , pMixedCtx);4191 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es); 4061 4192 AssertRCReturn(rc, rc); 4062 4193 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE, 4063 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs , pMixedCtx);4194 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs); 4064 4195 AssertRCReturn(rc, rc); 4065 4196 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE, 4066 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs , pMixedCtx);4197 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs); 4067 4198 AssertRCReturn(rc, rc); 4068 4199 … … 4217 4348 * areas. These MSRs will automatically be loaded to the host CPU on every 4218 4349 * successful VM entry and stored from the host CPU on every successful VM exit. 4350 * 4351 * This also creates/updates MSR slots for the host MSRs. The actual host 4352 * MSR values are -not- updated here for performance reasons. See 4353 * hmR0VmxSaveHostMsrs(). 4354 * 4219 4355 * Also loads the sysenter MSRs into the guest-state area in the VMCS. 4220 4356 * … … 4233 4369 4234 4370 /* 4235 * MSRs covered by Auto-load/store: EFER, LSTAR, STAR, SF_MASK, TSC_AUX (RDTSCP).4371 * Shared MSRs that we use the auto-load/store MSR area in the VMCS. 4236 4372 */ 4237 4373 int rc = VINF_SUCCESS; … … 4239 4375 { 4240 4376 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 4241 PVM pVM = pVCpu->CTX_SUFF(pVM);4242 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;4243 uint32_t cGuestMsrs = 0;4244 4245 4377 /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */ 4246 /** @todo r=ramshankar: Optimize this further to do lazy restoration and only 4247 * when the guest really is in 64-bit mode. */ 4378 PVM pVM = pVCpu->CTX_SUFF(pVM); 4248 4379 bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE); 4249 4380 if (fSupportsLongMode) 4250 4381 { 4251 pGuestMsr->u32Msr = MSR_K8_LSTAR; 4252 pGuestMsr->u32Reserved = 0; 4253 pGuestMsr->u64Value = pMixedCtx->msrLSTAR; /* 64 bits mode syscall rip */ 4254 pGuestMsr++; cGuestMsrs++; 4255 pGuestMsr->u32Msr = MSR_K6_STAR; 4256 pGuestMsr->u32Reserved = 0; 4257 pGuestMsr->u64Value = pMixedCtx->msrSTAR; /* legacy syscall eip, cs & ss */ 4258 pGuestMsr++; cGuestMsrs++; 4259 pGuestMsr->u32Msr = MSR_K8_SF_MASK; 4260 pGuestMsr->u32Reserved = 0; 4261 pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */ 4262 pGuestMsr++; cGuestMsrs++; 4263 pGuestMsr->u32Msr = MSR_K8_KERNEL_GS_BASE; 4264 pGuestMsr->u32Reserved = 0; 4265 pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */ 4266 pGuestMsr++; cGuestMsrs++; 4267 } 4268 4269 /* 4270 * RDTSCP requires the TSC_AUX MSR. Host and guest share the physical MSR. So we have to 4271 * load the guest's copy always (since the MSR bitmap allows passthru unconditionally). 4272 */ 4273 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 4274 { 4275 pGuestMsr->u32Msr = MSR_K8_TSC_AUX; 4276 pGuestMsr->u32Reserved = 0; 4277 rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pGuestMsr->u64Value); 4278 AssertRCReturn(rc, rc); 4279 pGuestMsr++; cGuestMsrs++; 4280 } 4281 4282 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */ 4283 if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.Msrs.u64Misc)) 4284 { 4285 LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs)); 4286 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE; 4287 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 4288 } 4289 4290 /* Update the VCPU's copy of the guest MSR count. */ 4291 pVCpu->hm.s.vmx.cGuestMsrs = cGuestMsrs; 4292 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cGuestMsrs); AssertRCReturn(rc, rc); 4293 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs); AssertRCReturn(rc, rc); 4382 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false /* fUpdateHostMsr */); 4383 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false /* fUpdateHostMsr */); 4384 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false /* fUpdateHostMsr */); 4385 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false /* fUpdateHostMsr */); 4386 } 4387 4388 # ifdef DEBUG 4389 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 4390 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++) 4391 Log4(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value)); 4392 # endif 4294 4393 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 4295 4394 … … 4337 4436 static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx) 4338 4437 { 4438 NOREF(pCtx); 4339 4439 /** @todo See if we can make use of other states, e.g. 4340 4440 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */ … … 4455 4555 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason); 4456 4556 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError); 4457 rc |= hmR0VmxReadExitQualificationVmcs(pV Cpu, pVmxTransient);4557 rc |= hmR0VmxReadExitQualificationVmcs(pVmxTransient); 4458 4558 AssertRC(rc); 4459 4559 … … 4634 4734 break; 4635 4735 } 4636 NOREF(pVM); 4736 NOREF(pVM); NOREF(pCtx); 4637 4737 } 4638 4738 … … 4765 4865 4766 4866 /** 4767 * Prepares for and executes VMLAUNCH (64 bitsguests) for 32-bit hosts4867 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts 4768 4868 * supporting 64-bit guests. 4769 4869 * … … 4940 5040 * @param pVCpu Pointer to the VMCPU. 4941 5041 * @param idxField The VMCS field encoding. 4942 * @param u64Val 16, 32 or 64 bitsvalue.5042 * @param u64Val 16, 32 or 64-bit value. 4943 5043 */ 4944 5044 VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val) … … 5037 5137 * @param pVCpu Pointer to the VMCPU. 5038 5138 * @param idxField The VMCS field encoding. 5039 * @param u64Val 16, 32 or 64 bitsvalue.5139 * @param u64Val 16, 32 or 64-bit value. 5040 5140 */ 5041 5141 VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val) … … 5113 5213 * @returns VBox status code. 5114 5214 * @param pVCpu Pointer to the VMCPU. 5115 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 5116 * out-of-sync. Make sure to update the required fields 5117 * before using them. 5215 * 5118 5216 * @remarks No-long-jump zone!!! 5119 5217 */ 5120 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu , PCPUMCTX pMixedCtx)5218 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu) 5121 5219 { 5122 5220 int rc = VERR_INTERNAL_ERROR_5; … … 5234 5332 DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5235 5333 { 5334 NOREF(pMixedCtx); 5236 5335 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID; 5237 5336 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); … … 5265 5364 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo)) 5266 5365 { 5267 rc = hmR0VmxReadExitIntInfoVmcs(pV Cpu, pVmxTransient);5366 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 5268 5367 AssertRCReturn(rc, rc); 5269 5368 … … 5403 5502 static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5404 5503 { 5504 NOREF(pMixedCtx); 5505 5405 5506 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0)) 5406 5507 { … … 5434 5535 static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5435 5536 { 5537 NOREF(pMixedCtx); 5538 5436 5539 int rc = VINF_SUCCESS; 5437 5540 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4)) … … 5601 5704 static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5602 5705 { 5706 NOREF(pMixedCtx); 5603 5707 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */ 5604 5708 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_ACTIVITY_STATE; … … 5648 5752 5649 5753 /** 5650 * Saves the guest FS_BASE MSRs from the current VMCS into the guest-CPU 5651 * context. 5754 * Saves the auto load/store'd guest MSRs from the current VMCS into the 5755 * guest-CPU context. Currently these are LSTAR, STAR, SFMASK, KERNEL-GS BASE 5756 * and TSC_AUX. 5652 5757 * 5653 5758 * @returns VBox status code. … … 5659 5764 * @remarks No-long-jump zone!!! 5660 5765 */ 5661 static int hmR0VmxSaveGuestFSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)5662 {5663 int rc = VINF_SUCCESS;5664 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_FS_BASE_MSR))5665 {5666 uint64_t u64Val = 0;5667 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_FS_BASE, &u64Val); AssertRCReturn(rc, rc);5668 pMixedCtx->fs.u64Base = u64Val;5669 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_FS_BASE_MSR;5670 }5671 return rc;5672 }5673 5674 5675 /**5676 * Saves the guest GS_BASE MSRs from the current VMCS into the guest-CPU5677 * context.5678 *5679 * @returns VBox status code.5680 * @param pVCpu Pointer to the VMCPU.5681 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe5682 * out-of-sync. Make sure to update the required fields5683 * before using them.5684 *5685 * @remarks No-long-jump zone!!!5686 */5687 static int hmR0VmxSaveGuestGSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)5688 {5689 int rc = VINF_SUCCESS;5690 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GS_BASE_MSR))5691 {5692 uint64_t u64Val = 0;5693 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GS_BASE, &u64Val); AssertRCReturn(rc, rc);5694 pMixedCtx->gs.u64Base = u64Val;5695 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GS_BASE_MSR;5696 }5697 return rc;5698 }5699 5700 5701 /**5702 * Saves the auto load/store'd guest MSRs from the current VMCS into the5703 * guest-CPU context. Currently these are LSTAR, STAR, SFMASK, KERNEL-GS BASE5704 * and TSC_AUX.5705 *5706 * @returns VBox status code.5707 * @param pVCpu Pointer to the VMCPU.5708 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe5709 * out-of-sync. Make sure to update the required fields5710 * before using them.5711 *5712 * @remarks No-long-jump zone!!!5713 */5714 5766 static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5715 5767 { … … 5718 5770 5719 5771 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 5720 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)5721 {5722 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;5723 pMsr += i;5772 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 5773 Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", pVCpu->hm.s.vmx.cMsrs)); 5774 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++) 5775 { 5724 5776 switch (pMsr->u32Msr) 5725 5777 { 5726 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break; 5727 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break; 5728 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break; 5778 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break; 5779 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break; 5780 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break; 5781 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break; 5729 5782 case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break; 5730 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;5731 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit. */ break;5732 5783 default: 5733 5784 { … … 5867 5918 PCPUMSELREG pSelReg) 5868 5919 { 5920 NOREF(pVCpu); 5921 5869 5922 uint32_t u32Val = 0; 5870 5923 int rc = VMXReadVmcs32(idxSel, &u32Val); … … 6086 6139 static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6087 6140 { 6141 NOREF(pMixedCtx); 6142 6088 6143 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */ 6089 6144 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_APIC_STATE; … … 6135 6190 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx); 6136 6191 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 6137 6138 rc = hmR0VmxSaveGuestFSBaseMsr(pVCpu, pMixedCtx);6139 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);6140 6141 rc = hmR0VmxSaveGuestGSBaseMsr(pVCpu, pMixedCtx);6142 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);6143 6192 6144 6193 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); … … 6901 6950 6902 6951 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET); 6952 NOREF(fBlockMovSS); NOREF(fBlockSti); 6903 6953 return rc; 6904 6954 } … … 6915 6965 DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6916 6966 { 6967 NOREF(pMixedCtx); 6917 6968 uint32_t u32IntInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID; 6918 6969 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */); … … 6949 7000 DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6950 7001 { 7002 NOREF(pMixedCtx); 6951 7003 uint32_t u32IntInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID; 6952 7004 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); … … 6967 7019 DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr) 6968 7020 { 7021 NOREF(pMixedCtx); 6969 7022 uint32_t u32IntInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID; 6970 7023 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); … … 7008 7061 DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr) 7009 7062 { 7063 NOREF(pMixedCtx); 7010 7064 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID; 7011 7065 if ( uVector == X86_XCPT_BP … … 7306 7360 Assert(pVM->hm.s.vmx.fSupported); 7307 7361 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 7308 NOREF(pCpu); 7362 NOREF(pCpu); NOREF(pVM); 7309 7363 7310 7364 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); … … 7347 7401 VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit) 7348 7402 { 7403 NOREF(fGlobalInit); 7404 7349 7405 switch (enmEvent) 7350 7406 { … … 7551 7607 7552 7608 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx); 7553 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoad GuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);7609 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadSharedMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 7554 7610 7555 7611 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx); … … 7588 7644 static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 7589 7645 { 7646 NOREF(pVM); 7647 7590 7648 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 7591 7649 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); … … 7841 7899 7842 7900 /* 7901 * The host MSR values the very first time around won't be updated, so we need to 7902 * fill those values in. Subsequently, it's updated as part of the host state. 7903 */ 7904 if (!pVCpu->hm.s.vmx.fUpdatedHostMsrs) 7905 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT); 7906 7907 /* 7843 7908 * Load the host state bits as we may've been preempted (only happens when 7844 7909 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM). … … 7886 7951 || idCurrentCpu != pVCpu->hm.s.idLastCpu) 7887 7952 { 7888 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu , pMixedCtx);7953 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu); 7889 7954 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false; 7890 7955 } … … 7900 7965 to start executing. */ 7901 7966 7902 #ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 7967 /** @todo Get rid of VBOX_WITH_AUTO_MSR_LOAD_RESTORE define. */ 7968 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 7969 /* 7970 * Load the TSC_AUX MSR when we are not intercepting RDTSCP. 7971 */ 7972 uint64_t uGuestTscAuxMsr; 7973 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 7974 { 7975 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT)) 7976 { 7977 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &uGuestTscAuxMsr); 7978 AssertRC(rc2); 7979 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, uGuestTscAuxMsr, true /* fUpdateHostMsr */); 7980 } 7981 else 7982 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX); 7983 } 7984 #ifdef VBOX_STRICT 7985 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu); 7986 #endif 7987 #else 7903 7988 /* 7904 7989 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that 7905 7990 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}. 7906 7991 */ 7907 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 7992 if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 7993 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT)) 7908 7994 { 7909 7995 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX); 7910 uint64_t u64HostTscAux = 0; 7911 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux); 7996 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAuxMsr); 7912 7997 AssertRC(rc2); 7913 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux );7998 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAuxMsr); 7914 7999 } 7915 8000 #endif … … 7936 8021 static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun) 7937 8022 { 8023 NOREF(pVM); 8024 7938 8025 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 7939 8026 … … 7946 8033 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT)) 7947 8034 { 7948 #ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE7949 /* Restore host's TSC_AUX. */7950 8035 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 8036 { 8037 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 8038 /* VT-x restored the host TSC_AUX MSR for us, update the guest value from the VMCS area 8039 if it could have changed without causing a VM-exit. */ 8040 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 8041 { 8042 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 8043 AssertRC(rc2); 8044 } 8045 #else 8046 /* Update guest's TSC_AUX if it could have changed. */ 8047 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 8048 { 8049 uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX); 8050 CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, u64GuestTscAuxMsr); 8051 } 8052 /* Restore host's TSC_AUX. */ 7951 8053 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux); 7952 8054 #endif 8055 } 8056 7953 8057 /** @todo Find a way to fix hardcoding a guestimate. */ 7954 8058 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() … … 8237 8341 DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason) 8238 8342 { 8343 #ifdef DEBUG_ramshankar 8344 # define SVVMCS() do { int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); } while (0) 8345 # define LDVMCS() do { VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST); } while (0) 8346 #endif 8239 8347 int rc; 8240 8348 switch (rcReason) 8241 8349 { 8242 case VMX_EXIT_EPT_MISCONFIG: rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient);break;8243 case VMX_EXIT_EPT_VIOLATION: rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient);break;8244 case VMX_EXIT_IO_INSTR: rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient);break;8245 case VMX_EXIT_CPUID: rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient);break;8246 case VMX_EXIT_RDTSC: rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient);break;8247 case VMX_EXIT_RDTSCP: rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient);break;8248 case VMX_EXIT_APIC_ACCESS: rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient);break;8249 case VMX_EXIT_XCPT_OR_NMI: rc = hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient);break;8250 case VMX_EXIT_MOV_CRX: rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient);break;8251 case VMX_EXIT_EXT_INT: rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient);break;8252 case VMX_EXIT_INT_WINDOW: rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient);break;8253 case VMX_EXIT_MWAIT: rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient);break;8254 case VMX_EXIT_MONITOR: rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient);break;8255 case VMX_EXIT_TASK_SWITCH: rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient);break;8256 case VMX_EXIT_PREEMPT_TIMER: rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient);break;8257 case VMX_EXIT_RDMSR: rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient);break;8258 case VMX_EXIT_WRMSR: rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient);break;8259 case VMX_EXIT_MOV_DRX: rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient);break;8260 case VMX_EXIT_TPR_BELOW_THRESHOLD: rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient);break;8261 case VMX_EXIT_HLT: rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient);break;8262 case VMX_EXIT_INVD: rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient);break;8263 case VMX_EXIT_INVLPG: rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient);break;8264 case VMX_EXIT_RSM: rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient);break;8265 case VMX_EXIT_MTF: rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient);break;8266 case VMX_EXIT_PAUSE: rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient);break;8267 case VMX_EXIT_XDTR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient);break;8268 case VMX_EXIT_TR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient);break;8269 case VMX_EXIT_WBINVD: rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient);break;8270 case VMX_EXIT_XSETBV: rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient);break;8271 case VMX_EXIT_RDRAND: rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient);break;8272 case VMX_EXIT_INVPCID: rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient);break;8273 case VMX_EXIT_GETSEC: rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient);break;8274 case VMX_EXIT_RDPMC: rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient);break;8350 case VMX_EXIT_EPT_MISCONFIG: /* SVVMCS(); */ rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8351 case VMX_EXIT_EPT_VIOLATION: /* SVVMCS(); */ rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8352 case VMX_EXIT_IO_INSTR: /* SVVMCS(); */ rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8353 case VMX_EXIT_CPUID: /* SVVMCS(); */ rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8354 case VMX_EXIT_RDTSC: /* SVVMCS(); */ rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8355 case VMX_EXIT_RDTSCP: /* SVVMCS(); */ rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8356 case VMX_EXIT_APIC_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8357 case VMX_EXIT_XCPT_OR_NMI: /* SVVMCS(); */ rc = hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8358 case VMX_EXIT_MOV_CRX: /* SVVMCS(); */ rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8359 case VMX_EXIT_EXT_INT: /* SVVMCS(); */ rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8360 case VMX_EXIT_INT_WINDOW: /* SVVMCS(); */ rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8361 case VMX_EXIT_MWAIT: /* SVVMCS(); */ rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8362 case VMX_EXIT_MONITOR: /* SVVMCS(); */ rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8363 case VMX_EXIT_TASK_SWITCH: /* SVVMCS(); */ rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8364 case VMX_EXIT_PREEMPT_TIMER: /* SVVMCS(); */ rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8365 case VMX_EXIT_RDMSR: /* SVVMCS(); */ rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8366 case VMX_EXIT_WRMSR: /* SVVMCS(); */ rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8367 case VMX_EXIT_MOV_DRX: /* SVVMCS(); */ rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8368 case VMX_EXIT_TPR_BELOW_THRESHOLD: /* SVVMCS(); */ rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8369 case VMX_EXIT_HLT: /* SVVMCS(); */ rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8370 case VMX_EXIT_INVD: /* SVVMCS(); */ rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8371 case VMX_EXIT_INVLPG: /* SVVMCS(); */ rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8372 case VMX_EXIT_RSM: /* SVVMCS(); */ rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8373 case VMX_EXIT_MTF: /* SVVMCS(); */ rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8374 case VMX_EXIT_PAUSE: /* SVVMCS(); */ rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8375 case VMX_EXIT_XDTR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8376 case VMX_EXIT_TR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8377 case VMX_EXIT_WBINVD: /* SVVMCS(); */ rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8378 case VMX_EXIT_XSETBV: /* SVVMCS(); */ rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8379 case VMX_EXIT_RDRAND: /* SVVMCS(); */ rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8380 case VMX_EXIT_INVPCID: /* SVVMCS(); */ rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8381 case VMX_EXIT_GETSEC: /* SVVMCS(); */ rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8382 case VMX_EXIT_RDPMC: /* SVVMCS(); */ rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break; 8275 8383 8276 8384 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break; … … 8338 8446 do { \ 8339 8447 Log4Func(("\n")); \ 8340 } while (0)8448 } while (0) 8341 8449 #else /* Release builds */ 8342 # define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() do { HMVMX_STOP_EXIT_DISPATCH_PROF(); } while(0) 8343 # define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while(0) 8450 # define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \ 8451 do { \ 8452 HMVMX_STOP_EXIT_DISPATCH_PROF(); \ 8453 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient); \ 8454 } while (0) 8455 # define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while (0) 8344 8456 #endif 8345 8457 … … 8359 8471 DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient) 8360 8472 { 8361 int rc = hmR0VmxReadExitInstrLenVmcs(pV Cpu, pVmxTransient);8473 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 8362 8474 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 8363 8475 AssertRCReturn(rc, rc); … … 8387 8499 uError = (err); \ 8388 8500 break; \ 8389 } else do { } while (0)8501 } else do { } while (0) 8390 8502 /* Duplicate of IEM_IS_CANONICAL(). */ 8391 8503 #define HMVMX_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000)) … … 8998 9110 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3); 8999 9111 9000 int rc = hmR0VmxReadExitIntInfoVmcs(pV Cpu, pVmxTransient);9112 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 9001 9113 AssertRCReturn(rc, rc); 9002 9114 … … 9075 9187 Assert(CPUMIsGuestInRealModeEx(pMixedCtx)); 9076 9188 9077 rc = hmR0VmxReadExitInstrLenVmcs(pV Cpu, pVmxTransient);9078 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pV Cpu, pVmxTransient);9189 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 9190 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 9079 9191 AssertRCReturn(rc, rc); 9080 9192 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo), … … 9295 9407 Assert(!pVM->hm.s.fNestedPaging); 9296 9408 9297 int rc = hmR0VmxReadExitQualificationVmcs(pV Cpu, pVmxTransient);9409 int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient); 9298 9410 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 9299 9411 AssertRCReturn(rc, rc); … … 9387 9499 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment". 9388 9500 */ 9501 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 9389 9502 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); 9390 9503 HMVMX_RETURN_UNEXPECTED_EXIT(); … … 9403 9516 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits" 9404 9517 */ 9518 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 9405 9519 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); 9406 9520 HMVMX_RETURN_UNEXPECTED_EXIT(); … … 9414 9528 { 9415 9529 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */ 9530 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 9416 9531 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); 9417 9532 HMVMX_RETURN_UNEXPECTED_EXIT(); … … 9429 9544 * See Intel spec. 25.3 "Other Causes of VM-exits". 9430 9545 */ 9546 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 9431 9547 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); 9432 9548 HMVMX_RETURN_UNEXPECTED_EXIT(); … … 9562 9678 rc = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient); 9563 9679 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient); 9564 rc |= hmR0VmxReadEntryInstrLenVmcs(pV Cpu, pVmxTransient);9680 rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient); 9565 9681 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState); 9566 9682 AssertRCReturn(rc, rc); … … 9584 9700 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc); 9585 9701 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val)); 9586 #e ndif9587 9588 PVM pVM = pVCpu->CTX_SUFF(pVM); 9589 HMDumpRegs(pVM, pVCpu, pMixedCtx); 9590 9702 #else 9703 NOREF(pVmxTransient); 9704 #endif 9705 9706 HMDumpRegs(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx); 9591 9707 return VERR_VMX_INVALID_GUEST_STATE; 9592 9708 } … … 9599 9715 HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient) 9600 9716 { 9601 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); 9717 NOREF(pVmxTransient); 9718 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx); 9602 9719 HMVMX_RETURN_UNEXPECTED_EXIT(); 9603 9720 } … … 9610 9727 HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient) 9611 9728 { 9612 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); 9729 NOREF(pVmxTransient); 9730 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx); 9613 9731 HMVMX_RETURN_UNEXPECTED_EXIT(); 9614 9732 } … … 9622 9740 { 9623 9741 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx)); 9742 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient); 9624 9743 return VERR_VMX_UNDEFINED_EXIT_CODE; 9625 9744 } … … 9671 9790 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 9672 9791 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 9792 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 9793 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 9673 9794 AssertRCReturn(rc, rc); 9674 9795 Log4(("CS:RIP=%04x:%#RX64 ECX=%X\n", pMixedCtx->cs.Sel, pMixedCtx->rip, pMixedCtx->ecx)); 9796 9797 #ifdef VBOX_STRICT 9798 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 9799 && hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)) 9800 { 9801 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx)); 9802 HMVMX_RETURN_UNEXPECTED_EXIT(); 9803 } 9804 #endif 9675 9805 9676 9806 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 9702 9832 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 9703 9833 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 9704 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 9834 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 9835 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 9705 9836 AssertRCReturn(rc, rc); 9706 9837 Log4(("ecx=%#RX32\n", pMixedCtx->ecx)); … … 9723 9854 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 9724 9855 } 9725 else if (pMixedCtx->ecx == MSR_K6_EFER) /* EFER is the only MSR we auto-load but don't allow write-passthrough. */9726 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);9727 9856 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */ 9728 9857 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true; 9729 9858 9730 /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */9859 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */ 9731 9860 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 9732 9861 { … … 9738 9867 case MSR_K8_FS_BASE: /* no break */ 9739 9868 case MSR_K8_GS_BASE: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break; 9740 case MSR_K8_KERNEL_GS_BASE: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); break; 9869 default: 9870 { 9871 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)) 9872 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 9873 break; 9874 } 9741 9875 } 9742 9876 } … … 9757 9891 } 9758 9892 9759 case MSR_K8_LSTAR: 9760 case MSR_K6_STAR: 9761 case MSR_K8_SF_MASK: 9762 case MSR_K8_TSC_AUX: 9763 case MSR_K8_KERNEL_GS_BASE: 9893 /* Writes to MSRs that are part of the auto-load/store are shouldn't cause VM-exits 9894 when MSR-bitmaps are supported. */ 9895 default: 9764 9896 { 9765 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", 9766 pMixedCtx->ecx)); 9767 HMVMX_RETURN_UNEXPECTED_EXIT(); 9897 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)) 9898 { 9899 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", 9900 pMixedCtx->ecx)); 9901 HMVMX_RETURN_UNEXPECTED_EXIT(); 9902 } 9903 break; 9768 9904 } 9769 9905 } … … 9802 9938 /* 9803 9939 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update 9804 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInject Event() and9940 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectPendingEvent() and 9805 9941 * resume guest execution. 9806 9942 */ … … 9825 9961 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 9826 9962 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2); 9827 int rc = hmR0VmxReadExitQualificationVmcs(pV Cpu, pVmxTransient);9963 int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient); 9828 9964 AssertRCReturn(rc, rc); 9829 9965 … … 9958 10094 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1); 9959 10095 9960 int rc2 = hmR0VmxReadExitQualificationVmcs(pV Cpu, pVmxTransient);9961 rc2 |= hmR0VmxReadExitInstrLenVmcs(pV Cpu, pVmxTransient);10096 int rc2 = hmR0VmxReadExitQualificationVmcs(pVmxTransient); 10097 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 9962 10098 rc2 |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 9963 10099 rc2 |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */ … … 9997 10133 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo)) 9998 10134 { 9999 rc2 = hmR0VmxReadExitInstrInfoVmcs(pV Cpu, pVmxTransient);10135 rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 10000 10136 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */ 10001 10137 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); … … 10033 10169 #else 10034 10170 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; 10035 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL );10171 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */); 10036 10172 if (RT_SUCCESS(rcStrict)) 10037 10173 { … … 10094 10230 } 10095 10231 10096 /* INS & OUTS with REP prefix modify RFLAGS. */ 10232 /* 10233 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru while booting Fedora 17 64-bit guest. 10234 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ. 10235 */ 10097 10236 if (fIOString) 10098 10237 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS); … … 10176 10315 10177 10316 /* Check if this task-switch occurred while delivery an event through the guest IDT. */ 10178 int rc = hmR0VmxReadExitQualificationVmcs(pV Cpu, pVmxTransient);10317 int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient); 10179 10318 AssertRCReturn(rc, rc); 10180 10319 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT) … … 10261 10400 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 10262 10401 #endif 10263 rc |= hmR0VmxReadExitQualificationVmcs(pV Cpu, pVmxTransient);10402 rc |= hmR0VmxReadExitQualificationVmcs(pVmxTransient); 10264 10403 AssertRCReturn(rc, rc); 10265 10404 … … 10352 10491 10353 10492 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */ 10354 PVM pVM = pVCpu->CTX_SUFF(pVM);10355 10493 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */); 10356 10494 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32); … … 10360 10498 10361 10499 #ifdef VBOX_WITH_STATISTICS 10362 rc = hmR0VmxReadExitQualificationVmcs(pV Cpu, pVmxTransient);10500 rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient); 10363 10501 AssertRCReturn(rc, rc); 10364 10502 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE) … … 10375 10513 * hmR0VmxSaveGuestAutoLoadStoreMsrs(). Update the segment registers and DR7 from the CPU. 10376 10514 */ 10377 rc = hmR0VmxReadExitQualificationVmcs(pV Cpu, pVmxTransient);10515 rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient); 10378 10516 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 10379 10517 rc |= hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx); … … 10482 10620 RTGCPHYS GCPhys = 0; 10483 10621 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys); 10484 rc |= hmR0VmxReadExitQualificationVmcs(pV Cpu, pVmxTransient);10622 rc |= hmR0VmxReadExitQualificationVmcs(pVmxTransient); 10485 10623 #if 0 10486 10624 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */ … … 10582 10720 if (rc == VINF_EM_RAW_GUEST_TRAP) 10583 10721 { 10584 rc = hmR0VmxReadExitIntInfoVmcs(pV Cpu, pVmxTransient);10585 rc |= hmR0VmxReadExitInstrLenVmcs(pV Cpu, pVmxTransient);10586 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pV Cpu, pVmxTransient);10722 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 10723 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 10724 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 10587 10725 AssertRCReturn(rc, rc); 10588 10726 … … 10609 10747 * for processing. 10610 10748 */ 10611 int rc = hmR0VmxReadExitQualificationVmcs(pV Cpu, pVmxTransient);10749 int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient); 10612 10750 AssertRCReturn(rc, rc); 10613 10751 … … 10652 10790 * Raise #DB in the guest. 10653 10791 */ 10654 rc = hmR0VmxReadExitIntInfoVmcs(pV Cpu, pVmxTransient);10655 rc |= hmR0VmxReadExitInstrLenVmcs(pV Cpu, pVmxTransient);10656 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pV Cpu, pVmxTransient);10792 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 10793 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 10794 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 10657 10795 AssertRCReturn(rc, rc); 10658 10796 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), … … 10718 10856 /* Forward #NM to the guest. */ 10719 10857 Assert(rc == VINF_EM_RAW_GUEST_TRAP); 10720 rc = hmR0VmxReadExitIntInfoVmcs(pV Cpu, pVmxTransient);10858 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 10721 10859 AssertRCReturn(rc, rc); 10722 10860 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), … … 10744 10882 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS 10745 10883 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */ 10746 rc = hmR0VmxReadExitIntInfoVmcs(pV Cpu, pVmxTransient);10747 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pV Cpu, pVmxTransient);10748 rc |= hmR0VmxReadExitInstrLenVmcs(pV Cpu, pVmxTransient);10884 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 10885 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 10886 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 10749 10887 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 10750 10888 AssertRCReturn(rc, rc); … … 10757 10895 /* We don't intercept #GP. */ 10758 10896 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n")); 10897 NOREF(pVmxTransient); 10759 10898 return VERR_VMX_UNEXPECTED_EXCEPTION; 10760 10899 #endif … … 10975 11114 10976 11115 11116 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS 10977 11117 /** 10978 11118 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects … … 10988 11128 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in 10989 11129 hmR0VmxCheckExitDueToEventDelivery(). */ 10990 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pV Cpu, pVmxTransient);10991 rc |= hmR0VmxReadExitInstrLenVmcs(pV Cpu, pVmxTransient);11130 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 11131 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 10992 11132 AssertRCReturn(rc, rc); 10993 11133 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO); … … 10997 11137 return VINF_SUCCESS; 10998 11138 } 11139 #endif 10999 11140 11000 11141 … … 11006 11147 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); 11007 11148 PVM pVM = pVCpu->CTX_SUFF(pVM); 11008 int rc = hmR0VmxReadExitQualificationVmcs(pV Cpu, pVmxTransient);11009 rc |= hmR0VmxReadExitIntInfoVmcs(pV Cpu, pVmxTransient);11010 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pV Cpu, pVmxTransient);11149 int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient); 11150 rc |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 11151 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 11011 11152 AssertRCReturn(rc, rc); 11012 11153 … … 11032 11173 #else 11033 11174 Assert(!pVM->hm.s.fNestedPaging); 11175 NOREF(pVM); 11034 11176 #endif 11035 11177 -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r49508 r49520 2923 2923 LogRel(("HM: CPU[%u] EntryCtls %#RX32\n", i, pVCpu->hm.s.vmx.u32EntryCtls)); 2924 2924 LogRel(("HM: CPU[%u] ExitCtls %#RX32\n", i, pVCpu->hm.s.vmx.u32ExitCtls)); 2925 LogRel(("HM: CPU[%u] MSRBitmapPhys%#RHp\n", i, pVCpu->hm.s.vmx.HCPhysMsrBitmap));2925 LogRel(("HM: CPU[%u] HCPhysMsrBitmap %#RHp\n", i, pVCpu->hm.s.vmx.HCPhysMsrBitmap)); 2926 2926 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 2927 LogRel(("HM: CPU[%u] GuestMSRPhys%#RHp\n", i, pVCpu->hm.s.vmx.HCPhysGuestMsr));2928 LogRel(("HM: CPU[%u] H ostMsrPhys%#RHp\n", i, pVCpu->hm.s.vmx.HCPhysHostMsr));2929 LogRel(("HM: CPU[%u] c GuestMSRs %u\n", i, pVCpu->hm.s.vmx.cGuestMsrs));2927 LogRel(("HM: CPU[%u] HCPhysGuestMsr %#RHp\n", i, pVCpu->hm.s.vmx.HCPhysGuestMsr)); 2928 LogRel(("HM: CPU[%u] HCPhysHostMsr %#RHp\n", i, pVCpu->hm.s.vmx.HCPhysHostMsr)); 2929 LogRel(("HM: CPU[%u] cMsrs %u\n", i, pVCpu->hm.s.vmx.cMsrs)); 2930 2930 #endif 2931 2931 } -
trunk/src/VBox/VMM/include/HMInternal.h
r49275 r49520 79 79 * @{ 80 80 */ 81 #define HM_CHANGED_GUEST_CR0 RT_BIT(0) 81 #define HM_CHANGED_GUEST_CR0 RT_BIT(0) /* Shared */ 82 82 #define HM_CHANGED_GUEST_CR3 RT_BIT(1) 83 83 #define HM_CHANGED_GUEST_CR4 RT_BIT(2) … … 87 87 #define HM_CHANGED_GUEST_TR RT_BIT(6) 88 88 #define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(7) 89 #define HM_CHANGED_GUEST_DEBUG RT_BIT(8) 89 #define HM_CHANGED_GUEST_DEBUG RT_BIT(8) /* Shared */ 90 90 #define HM_CHANGED_GUEST_RIP RT_BIT(9) 91 91 #define HM_CHANGED_GUEST_RSP RT_BIT(10) … … 605 605 R0PTRTYPE(void *) pvHostMsr; 606 606 607 /** Number of automatically loaded/restored guest MSRs during the world switch. */ 608 uint32_t cGuestMsrs; 609 uint32_t uAlignment; 607 /** Number of guest/host MSR pairs in the auto-load/store area. */ 608 uint32_t cMsrs; 609 /** Whether the host MSR values are up-to-date. */ 610 bool fUpdatedHostMsrs; 611 uint8_t u8Align[7]; 610 612 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 611 613
Note:
See TracChangeset
for help on using the changeset viewer.