Changeset 66581 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Apr 17, 2017 3:00:00 AM (8 years ago)
- svn:sync-xref-src-repo-rev:
- 114608
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r66227 r66581 1438 1438 uint64_t uValidatedEfer; 1439 1439 uint64_t const uOldEfer = pVCpu->cpum.s.Guest.msrEFER; 1440 int rc = CPUM GetValidateEfer(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.Guest.cr0, uOldEfer, uValue, &uValidatedEfer);1440 int rc = CPUMQueryValidatedGuestEfer(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.Guest.cr0, uOldEfer, uValue, &uValidatedEfer); 1441 1441 if (RT_FAILURE(rc)) 1442 1442 return VERR_CPUM_RAISE_GP_0; … … 6114 6114 * this function returns VINF_SUCCESS). 6115 6115 */ 6116 VMMDECL(int) CPUM GetValidateEfer(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer, uint64_t *puValidEfer)6116 VMMDECL(int) CPUMQueryValidatedGuestEfer(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer, uint64_t *puValidEfer) 6117 6117 { 6118 6118 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdPatmExt[0].uEax >= 0x80000001 -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r65792 r66581 193 193 /** @todo Complete MONITOR implementation. */ 194 194 return VINF_SUCCESS; 195 } 196 197 198 /** 199 * Checks if the monitor hardware is armed / active. 200 * 201 * @returns true if armed, false otherwise. 202 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 203 */ 204 VMM_INT_DECL(bool) EMMonitorIsArmed(PVMCPU pVCpu) 205 { 206 return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE); 195 207 } 196 208 -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r66386 r66581 176 176 177 177 178 /** 179 * Converts an SVM event type to a TRPM event type. 180 * 181 * @returns The TRPM event type. 182 * @retval TRPM_32BIT_HACK if the specified type of event isn't among the set 183 * of recognized trap types. 184 * 185 * @param pEvent Pointer to the SVM event. 186 */ 187 VMM_INT_DECL(TRPMEVENT) hmSvmEventToTrpmEventType(PCSVMEVENT pEvent) 188 { 189 uint8_t const uType = pEvent->n.u3Type; 190 switch (uType) 191 { 192 case SVM_EVENT_EXTERNAL_IRQ: return TRPM_HARDWARE_INT; 193 case SVM_EVENT_SOFTWARE_INT: return TRPM_SOFTWARE_INT; 194 case SVM_EVENT_EXCEPTION: 195 case SVM_EVENT_NMI: return TRPM_TRAP; 196 default: 197 break; 198 } 199 AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType)); 200 return TRPM_32BIT_HACK; 201 } 202 203 178 204 #ifndef IN_RC 205 /** 206 * Converts an IEM exception event type to an SVM event type. 207 * 208 * @returns The SVM event type. 209 * @retval UINT8_MAX if the specified type of event isn't among the set 210 * of recognized IEM event types. 211 * 212 * @param uVector The vector of the event. 213 * @param fIemXcptFlags The IEM exception / interrupt flags. 214 */ 215 static uint8_t hmSvmEventTypeFromIemEvent(uint32_t uVector, uint32_t fIemXcptFlags) 216 { 217 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_CPU_XCPT) 218 return SVM_EVENT_EXCEPTION; 219 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_EXT_INT) 220 return uVector != X86_XCPT_NMI ? SVM_EVENT_EXTERNAL_IRQ : SVM_EVENT_NMI; 221 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_SOFT_INT) 222 return SVM_EVENT_SOFTWARE_INT; 223 AssertMsgFailed(("hmSvmEventTypeFromIemEvent: Invalid IEM xcpt/int. type %#x, uVector=%#x\n", fIemXcptFlags, uVector)); 224 return UINT8_MAX; 225 } 226 227 179 228 /** 180 229 * Performs the operations necessary that are part of the vmrun instruction … … 247 296 /* Nested paging. */ 248 297 if ( pVmcbCtrl->NestedPaging.n.u1NestedPaging 249 && !pVM->cpum.ro.GuestFeatures. svm.feat.n.fNestedPaging)298 && !pVM->cpum.ro.GuestFeatures.fSvmNestedPaging) 250 299 { 251 300 Log(("HMSvmVmRun: Nested paging not supported -> #VMEXIT\n")); … … 255 304 /* AVIC. */ 256 305 if ( pVmcbCtrl->IntCtrl.n.u1AvicEnable 257 && !pVM->cpum.ro.GuestFeatures. svm.feat.n.fAvic)306 && !pVM->cpum.ro.GuestFeatures.fSvmAvic) 258 307 { 259 308 Log(("HMSvmVmRun: AVIC not supported -> #VMEXIT\n")); … … 263 312 /* Last branch record (LBR) virtualization. */ 264 313 if ( (pVmcbCtrl->u64LBRVirt & SVM_LBR_VIRT_ENABLE) 265 && !pVM->cpum.ro.GuestFeatures. svm.feat.n.fLbrVirt)314 && !pVM->cpum.ro.GuestFeatures.fSvmLbrVirt) 266 315 { 267 316 Log(("HMSvmVmRun: LBR virtualization not supported -> #VMEXIT\n")); … … 350 399 /* EFER, CR0 and CR4. */ 351 400 uint64_t uValidEfer; 352 rc = CPUM GetValidateEfer(pVM, VmcbNstGst.u64CR0, 0 /* uOldEfer */, VmcbNstGst.u64EFER, &uValidEfer);401 rc = CPUMQueryValidatedGuestEfer(pVM, VmcbNstGst.u64CR0, 0 /* uOldEfer */, VmcbNstGst.u64EFER, &uValidEfer); 353 402 if (RT_FAILURE(rc)) 354 403 { … … 592 641 pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo1 = uExitInfo1; 593 642 pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo2 = uExitInfo2; 643 644 /* 645 * Update the exit interrupt information field if this #VMEXIT happened as a result 646 * of delivering an event. 647 */ 648 { 649 uint8_t uExitIntVector; 650 uint32_t uExitIntErr; 651 uint32_t fExitIntFlags; 652 bool const fRaisingEvent = IEMGetCurrentXcpt(pVCpu, &uExitIntVector, &fExitIntFlags, &uExitIntErr, 653 NULL /* uExitIntCr2 */); 654 pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u1Valid = fRaisingEvent; 655 if (fRaisingEvent) 656 { 657 pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u8Vector = uExitIntVector; 658 pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u3Type = hmSvmEventTypeFromIemEvent(uExitIntVector, fExitIntFlags); 659 if (fExitIntFlags & IEM_XCPT_FLAGS_ERR) 660 { 661 pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u1ErrorCodeValid = true; 662 pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u32ErrorCode = uExitIntErr; 663 } 664 } 665 } 594 666 595 667 /* … … 920 992 * Check if any IO accesses are being intercepted. 921 993 */ 922 if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT)) 923 { 924 Assert(CPUMIsGuestInNestedHwVirtMode(pCtx)); 925 926 /* 927 * The IOPM layout: 928 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or 929 * two 4K pages. However, since it's possible to do a 32-bit port IO at port 930 * 65534 (thus accessing 4 bytes), we need 3 extra bits beyond the two 4K page. 931 * 932 * For IO instructions that access more than a single byte, the permission bits 933 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted. 934 */ 935 uint8_t *pbIopm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap); 936 937 uint16_t const u16Port = pIoExitInfo->n.u16Port; 938 uint16_t const offIoBitmap = u16Port >> 3; 939 uint16_t const fSizeMask = pIoExitInfo->n.u1OP32 ? 0xf : pIoExitInfo->n.u1OP16 ? 3 : 1; 940 uint8_t const cShift = u16Port - (offIoBitmap << 3); 941 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift); 942 943 pbIopm += offIoBitmap; 944 uint16_t const fIopmBits = *(uint16_t *)pbIopm; 945 if (fIopmBits & fIopmMask) 946 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_IOIO, pIoExitInfo->u, uNextRip); 947 } 994 Assert(CPUMIsGuestInNestedHwVirtMode(pCtx)); 995 Assert(CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT)); 996 997 /* 998 * The IOPM layout: 999 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or 1000 * two 4K pages. 1001 * 1002 * For IO instructions that access more than a single byte, the permission bits 1003 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted. 1004 * 1005 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes), 1006 * we need 3 extra bits beyond the second 4K page. 1007 */ 1008 uint8_t const *pbIopm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap); 1009 1010 uint16_t const u16Port = pIoExitInfo->n.u16Port; 1011 uint16_t const offIopm = u16Port >> 3; 1012 uint16_t const fSizeMask = pIoExitInfo->n.u1OP32 ? 0xf : pIoExitInfo->n.u1OP16 ? 3 : 1; 1013 uint8_t const cShift = u16Port - (offIopm << 3); 1014 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift); 1015 1016 pbIopm += offIopm; 1017 uint16_t const fIopmBits = *(uint16_t *)pbIopm; 1018 if (fIopmBits & fIopmMask) 1019 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_IOIO, pIoExitInfo->u, uNextRip); 1020 948 1021 return VINF_HM_INTERCEPT_NOT_ACTIVE; 949 1022 } … … 955 1028 * 956 1029 * @returns Strict VBox status code. 957 * @retval VINF_SVM_INTERCEPT_NOT_ACTIVE if the intercept is not active or958 * we're not executing a nested-guest.1030 * @retval VINF_SVM_INTERCEPT_NOT_ACTIVE if the MSR permission bitmap does not 1031 * specify interception of the accessed MSR @a idMsr. 959 1032 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred 960 1033 * successfully. … … 973 1046 * Check if any MSRs are being intercepted. 974 1047 */ 975 if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_MSR_PROT)) 1048 Assert(CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_MSR_PROT)); 1049 Assert(CPUMIsGuestInNestedHwVirtMode(pCtx)); 1050 1051 uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ; 1052 1053 /* 1054 * Get the byte and bit offset of the permission bits corresponding to the MSR. 1055 */ 1056 uint16_t offMsrpm; 1057 uint32_t uMsrpmBit; 1058 int rc = hmSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit); 1059 if (RT_SUCCESS(rc)) 976 1060 { 977 Assert(CPUMIsGuestInNestedHwVirtMode(pCtx)); 978 uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ; 1061 Assert(uMsrpmBit < 0x3fff); 1062 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT); 1063 if (fWrite) 1064 ++uMsrpmBit; 979 1065 980 1066 /* 981 * Get the byte and bit offset of the permission bits corresponding to the MSR.1067 * Check if the bit is set, if so, trigger a #VMEXIT. 982 1068 */ 983 uint16_t offMsrpm; 984 uint32_t uMsrpmBit; 985 int rc = hmSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit); 986 if (RT_SUCCESS(rc)) 987 { 988 Assert(uMsrpmBit < 0x3fff); 989 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT); 990 if (fWrite) 991 ++uMsrpmBit; 992 993 /* 994 * Check if the bit is set, if so, trigger a #VMEXIT. 995 */ 996 uint8_t *pbMsrpm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap); 997 pbMsrpm += offMsrpm; 998 if (ASMBitTest(pbMsrpm, uMsrpmBit)) 999 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */); 1000 } 1001 else 1002 { 1003 /* 1004 * This shouldn't happen, but if it does, cause a #VMEXIT and let the "host" (guest hypervisor) deal with it. 1005 */ 1006 Log(("HMSvmNstGstHandleIntercept: Invalid/out-of-range MSR %#RX32 fWrite=%RTbool\n", idMsr, fWrite)); 1069 uint8_t *pbMsrpm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap); 1070 pbMsrpm += offMsrpm; 1071 if (ASMBitTest(pbMsrpm, uMsrpmBit)) 1007 1072 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */); 1008 } 1073 } 1074 else 1075 { 1076 /* 1077 * This shouldn't happen, but if it does, cause a #VMEXIT and let the "host" (guest hypervisor) deal with it. 1078 */ 1079 Log(("HMSvmNstGstHandleIntercept: Invalid/out-of-range MSR %#RX32 fWrite=%RTbool\n", idMsr, fWrite)); 1080 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */); 1009 1081 } 1010 1082 return VINF_HM_INTERCEPT_NOT_ACTIVE; … … 1069 1141 #endif /* !IN_RC */ 1070 1142 1071 1072 /**1073 * Converts an SVM event type to a TRPM event type.1074 *1075 * @returns The TRPM event type.1076 * @retval TRPM_32BIT_HACK if the specified type of event isn't among the set1077 * of recognized trap types.1078 *1079 * @param pEvent Pointer to the SVM event.1080 */1081 VMM_INT_DECL(TRPMEVENT) hmSvmEventToTrpmEventType(PCSVMEVENT pEvent)1082 {1083 uint8_t const uType = pEvent->n.u3Type;1084 switch (uType)1085 {1086 case SVM_EVENT_EXTERNAL_IRQ: return TRPM_HARDWARE_INT;1087 case SVM_EVENT_SOFTWARE_INT: return TRPM_SOFTWARE_INT;1088 case SVM_EVENT_EXCEPTION:1089 case SVM_EVENT_NMI: return TRPM_TRAP;1090 default:1091 break;1092 }1093 AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType));1094 return TRPM_32BIT_HACK;1095 }1096 -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r66457 r66581 370 370 * Check the common SVM instruction preconditions. 371 371 */ 372 # define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \372 # define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \ 373 373 do { \ 374 374 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \ … … 392 392 * Check if an SVM is enabled. 393 393 */ 394 # define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))394 # define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu))) 395 395 396 396 /** 397 397 * Check if an SVM control/instruction intercept is set. 398 398 */ 399 # define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))399 # define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept))) 400 400 401 401 /** 402 402 * Check if an SVM read CRx intercept is set. 403 403 */ 404 # define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))404 # define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr))) 405 405 406 406 /** 407 407 * Check if an SVM write CRx intercept is set. 408 408 */ 409 # define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))409 # define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr))) 410 410 411 411 /** 412 412 * Check if an SVM read DRx intercept is set. 413 413 */ 414 # define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))414 # define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr))) 415 415 416 416 /** 417 417 * Check if an SVM write DRx intercept is set. 418 418 */ 419 # define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))419 # define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr))) 420 420 421 421 /** 422 422 * Check if an SVM exception intercept is set. 423 423 */ 424 #define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_enmXcpt) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_enmXcpt))) 425 #endif /* VBOX_WITH_NESTED_HWVIRT */ 424 # define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uVector))) 425 426 /** 427 * Invokes the SVM \#VMEXIT handler for the nested-guest. 428 */ 429 # define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \ 430 do \ 431 { \ 432 VBOXSTRICTRC rcStrictVmExit = HMSvmNstGstVmExit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), \ 433 (a_uExitInfo2)); \ 434 return rcStrictVmExit == VINF_SVM_VMEXIT ? VINF_SUCCESS : rcStrictVmExit; \ 435 } while (0) 436 437 /** 438 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the 439 * corresponding decode assist information. 440 */ 441 # define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \ 442 do \ 443 { \ 444 uint64_t uExitInfo1; \ 445 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssist \ 446 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \ 447 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \ 448 else \ 449 uExitInfo1 = 0; \ 450 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \ 451 } while (0) 452 453 /** 454 * Checks and handles an SVM MSR intercept. 455 */ 456 # define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) \ 457 HMSvmNstGstHandleMsrIntercept((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_idMsr), (a_fWrite)) 458 459 #else 460 # define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0) 461 # define IEM_IS_SVM_ENABLED(a_pVCpu) (false) 462 # define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false) 463 # define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false) 464 # define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false) 465 # define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false) 466 # define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false) 467 # define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false) 468 # define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0) 469 # define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0) 470 # define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) (VERR_SVM_IPE_1) 471 472 #endif /* VBOX_WITH_NESTED_HWVIRT && !IN_RC */ 426 473 427 474 … … 834 881 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue); 835 882 883 #if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(IN_RC) 884 /** 885 * Checks if the intercepted IO instruction causes a \#VMEXIT and handles it 886 * accordingly. 887 * 888 * @returns VBox strict status code. 889 * @param pVCpu The cross context virtual CPU structure of the calling thread. 890 * @param u16Port The IO port being accessed. 891 * @param enmIoType The type of IO access. 892 * @param cbReg The IO operand size in bytes. 893 * @param cAddrSizeBits The address size bits (for 16, 32 or 64). 894 * @param iEffSeg The effective segment number. 895 * @param fRep Whether this is a repeating IO instruction (REP prefix). 896 * @param fStrIo Whether this is a string IO instruction. 897 * @param cbInstr The length of the IO instruction in bytes. 898 * 899 * @remarks This must be called only when IO instructions are intercepted by the 900 * nested-guest hypervisor. 901 */ 902 IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg, 903 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr) 904 { 905 Assert(IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT)); 906 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64); 907 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8); 908 909 static const uint32_t s_auIoOpSize[] = { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 }; 910 static const uint32_t s_auIoAddrSize[] = { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 }; 911 912 SVMIOIOEXITINFO IoExitInfo; 913 IoExitInfo.u = s_auIoOpSize[cbReg & 7]; 914 IoExitInfo.u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7]; 915 IoExitInfo.n.u1STR = fStrIo; 916 IoExitInfo.n.u1REP = fRep; 917 IoExitInfo.n.u3SEG = iEffSeg & 0x7; 918 IoExitInfo.n.u1Type = enmIoType; 919 IoExitInfo.n.u16Port = u16Port; 920 921 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 922 return HMSvmNstGstHandleIOIntercept(pVCpu, pCtx, &IoExitInfo, pCtx->rip + cbInstr); 923 } 924 925 #else 926 IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg, 927 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr) 928 { 929 RT_NOREF9(pVCpu, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo, cbInstr); 930 return VERR_IEM_IPE_9; 931 } 932 #endif /* VBOX_WITH_NESTED_HWVIRT && !IN_RC */ 836 933 837 934 … … 3126 3223 IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu) 3127 3224 { 3225 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN)) 3226 { 3227 Log2(("shutdown: Guest intercept -> #VMEXIT\n")); 3228 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 3229 } 3230 3128 3231 RT_NOREF_PV(pVCpu); 3129 3232 /** @todo Probably need a separate error code and handling for this to … … 3249 3352 * @{ 3250 3353 */ 3251 3252 /** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.3253 * @{ */3254 /** CPU exception. */3255 #define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)3256 /** External interrupt (from PIC, APIC, whatever). */3257 #define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)3258 /** Software interrupt (int or into, not bound).3259 * Returns to the following instruction */3260 #define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)3261 /** Takes an error code. */3262 #define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)3263 /** Takes a CR2. */3264 #define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)3265 /** Generated by the breakpoint instruction. */3266 #define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)3267 /** Generated by a DRx instruction breakpoint and RF should be cleared. */3268 #define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)3269 /** @} */3270 3354 3271 3355 … … 5165 5249 #endif 5166 5250 5251 #if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(IN_RC) 5252 if (IEM_IS_SVM_ENABLED(pVCpu)) 5253 { 5254 /* 5255 * Handle nested-guest SVM exception and software interrupt intercepts, 5256 * see AMD spec. 15.12 "Exception Intercepts". 5257 * 5258 * - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs. 5259 * - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts 5260 * even when they use a vector in the range 0 to 31. 5261 * - ICEBP should not trigger #DB intercept, but its own intercept, so we catch it early in iemOp_int1. 5262 * - For #PF exceptions, its intercept is checked before CR2 is written by the exception. 5263 */ 5264 /* Check NMI intercept */ 5265 if ( u8Vector == X86_XCPT_NMI 5266 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI)) 5267 { 5268 Log(("iemRaiseXcptOrInt: NMI intercept -> #VMEXIT\n")); 5269 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5270 } 5271 5272 /* Check CPU exception intercepts. */ 5273 if ( IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector) 5274 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)) 5275 { 5276 Assert(u8Vector <= 31 /* X86_XCPT_MAX */); 5277 uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0; 5278 uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0; 5279 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist 5280 && u8Vector == X86_XCPT_PF 5281 && !(uErr & X86_TRAP_PF_ID)) 5282 { 5283 /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */ 5284 #ifdef IEM_WITH_CODE_TLB 5285 #else 5286 uint8_t const offOpCode = pVCpu->iem.s.offOpcode; 5287 uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; 5288 if ( cbCurrent > 0 5289 && cbCurrent < sizeof(pCtx->hwvirt.svm.VmcbCtrl.abInstr)) 5290 { 5291 Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode)); 5292 memcpy(&pCtx->hwvirt.svm.VmcbCtrl.abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent); 5293 } 5294 #endif 5295 } 5296 Log(("iemRaiseXcptOrInt: Xcpt intercept (u8Vector=%#x uExitInfo1=%#RX64, uExitInfo2=%#RX64 -> #VMEXIT\n", u8Vector, 5297 uExitInfo1, uExitInfo2)); 5298 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2); 5299 } 5300 5301 /* Check software interrupt (INTn) intercepts. */ 5302 if ( IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN) 5303 && (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)) 5304 { 5305 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? u8Vector : 0; 5306 Log(("iemRaiseXcptOrInt: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector)); 5307 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */); 5308 } 5309 } 5310 #endif 5311 5167 5312 /* 5168 5313 * Do recursion accounting. … … 5179 5324 5180 5325 /** @todo double and tripple faults. */ 5326 /** @todo When implementing #DF, the SVM nested-guest #DF intercepts needs some 5327 * care. See AMD spec. 15.12 "Exception Intercepts". */ 5181 5328 if (pVCpu->iem.s.cXcptRecursions >= 3) 5182 5329 { … … 5194 5341 } 5195 5342 pVCpu->iem.s.cXcptRecursions++; 5196 pVCpu->iem.s.uCurXcpt = u8Vector; 5197 pVCpu->iem.s.fCurXcpt = fFlags; 5343 pVCpu->iem.s.uCurXcpt = u8Vector; 5344 pVCpu->iem.s.fCurXcpt = fFlags; 5345 pVCpu->iem.s.uCurXcptErr = uErr; 5346 pVCpu->iem.s.uCurXcptCr2 = uCr2; 5198 5347 5199 5348 /* … … 9669 9818 iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) 9670 9819 { 9820 VBOXSTRICTRC rcStrict; 9821 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS)) 9822 { 9823 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n")); 9824 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 9825 } 9826 9671 9827 /* 9672 9828 * The SIDT and SGDT instructions actually stores the data using two 9673 9829 * independent writes. The instructions does not respond to opsize prefixes. 9674 9830 */ 9675 VBOXSTRICTRCrcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);9831 rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit); 9676 9832 if (rcStrict == VINF_SUCCESS) 9677 9833 { … … 11756 11912 } while (0) 11757 11913 11758 #if 011759 #ifdef VBOX_WITH_NESTED_HWVIRT11760 /** The instruction raises an \#UD when SVM is not enabled. */11761 #define IEMOP_HLP_NEEDS_SVM_ENABLED() \11762 do \11763 { \11764 if (IEM_IS_SVM_ENABLED(pVCpu)) \11765 return IEMOP_RAISE_INVALID_OPCODE(); \11766 } while (0)11767 #endif11768 #endif11769 11770 11914 /** The instruction is not available in 64-bit mode, throw \#UD if we're in 11771 11915 * 64-bit mode. */ … … 11910 12054 return IEMOP_RAISE_INVALID_OPCODE(); \ 11911 12055 } while (0) 12056 12057 #if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(IN_RC) 12058 /** Check and handles SVM nested-guest control & instruction intercept. */ 12059 # define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \ 12060 do \ 12061 { \ 12062 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \ 12063 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \ 12064 } while (0) 12065 12066 /** Check and handle SVM nested-guest CR0 read intercept. */ 12067 # define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \ 12068 do \ 12069 { \ 12070 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \ 12071 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \ 12072 } while (0) 12073 12074 #else 12075 # define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \ 12076 do { RT_NOREF5(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2); } while (0) 12077 12078 # define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \ 12079 do { RT_NOREF4(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2); } while (0) 12080 12081 #endif /* VBOX_WITH_NESTED_HWVIRT && !IN_RC */ 12082 11912 12083 11913 12084 /** … … 15040 15211 15041 15212 15042 #ifdef VBOX_WITH_NESTED_HWVIRT15043 15213 /** 15044 15214 * Checks if IEM is in the process of delivering an event (interrupt or 15045 15215 * exception). 15046 15216 * 15047 * @returns true if it's raising an interrupt or exception, false otherwise. 15048 * @param pVCpu The cross context virtual CPU structure. 15049 */ 15050 VMM_INT_DECL(bool) IEMIsRaisingIntOrXcpt(PVMCPU pVCpu) 15051 { 15052 return pVCpu->iem.s.cXcptRecursions > 0; 15053 } 15054 15055 15217 * @returns true if we're in the process of raising an interrupt or exception, 15218 * false otherwise. 15219 * @param pVCpu The cross context virtual CPU structure. 15220 * @param puVector Where to store the vector associated with the 15221 * currently delivered event, optional. 15222 * @param pfFlags Where to store th event delivery flags (see 15223 * IEM_XCPT_FLAGS_XXX), optional. 15224 * @param puErr Where to store the error code associated with the 15225 * event, optional. 15226 * @param puCr2 Where to store the CR2 associated with the event, 15227 * optional. 15228 */ 15229 VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2) 15230 { 15231 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0; 15232 if (fRaisingXcpt) 15233 { 15234 if (puVector) 15235 *puVector = pVCpu->iem.s.uCurXcpt; 15236 if (pfFlags) 15237 *pfFlags = pVCpu->iem.s.fCurXcpt; 15238 /* The caller should check the flags to determine if the error code & CR2 are valid for the event. */ 15239 if (puErr) 15240 *puErr = pVCpu->iem.s.uCurXcptErr; 15241 if (puCr2) 15242 *puCr2 = pVCpu->iem.s.uCurXcptCr2; 15243 } 15244 return fRaisingXcpt; 15245 } 15246 15247 15248 #ifdef VBOX_WITH_NESTED_HWVIRT 15056 15249 /** 15057 15250 * Interface for HM and EM to emulate the STGI instruction. -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r66462 r66581 558 558 VBOXSTRICTRC rcStrict; 559 559 560 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF)) 561 { 562 Log2(("pushf: Guest intercept -> #VMEXIT\n")); 563 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 564 } 565 560 566 /* 561 567 * If we're in V8086 mode some care is required (which is why we're in … … 618 624 VBOXSTRICTRC rcStrict; 619 625 uint32_t fEflNew; 626 627 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF)) 628 { 629 Log2(("popf: Guest intercept -> #VMEXIT\n")); 630 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 631 } 620 632 621 633 /* … … 3856 3868 3857 3869 /* 3870 * The SVM nested-guest intercept for iret takes priority over all exceptions, 3871 * see AMD spec. "15.9 Instruction Intercepts". 3872 */ 3873 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET)) 3874 { 3875 Log(("iret: Guest intercept -> #VMEXIT\n")); 3876 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 3877 } 3878 3879 /* 3858 3880 * Call a mode specific worker. 3859 3881 */ … … 4632 4654 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM); 4633 4655 4656 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES)) 4657 { 4658 Log(("lgdt: Guest intercept -> #VMEXIT\n")); 4659 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4660 } 4661 4634 4662 /* 4635 4663 * Fetch the limit and base address. … … 4698 4726 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM); 4699 4727 4728 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES)) 4729 { 4730 Log(("lidt: Guest intercept -> #VMEXIT\n")); 4731 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4732 } 4733 4700 4734 /* 4701 4735 * Fetch the limit and base address. … … 4783 4817 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL)) 4784 4818 { 4819 /* Nested-guest SVM intercept. */ 4820 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES)) 4821 { 4822 Log(("lldt: Guest intercept -> #VMEXIT\n")); 4823 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4824 } 4825 4785 4826 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt)); 4786 4827 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) … … 4855 4896 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt)); 4856 4897 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt); 4898 } 4899 4900 /* Nested-guest SVM intercept. */ 4901 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES)) 4902 { 4903 Log(("lldt: Guest intercept -> #VMEXIT\n")); 4904 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4857 4905 } 4858 4906 … … 4908 4956 return iemRaiseGeneralProtectionFault0(pVCpu); 4909 4957 } 4958 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES)) 4959 { 4960 Log(("ltr: Guest intercept -> #VMEXIT\n")); 4961 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4962 } 4910 4963 4911 4964 /* … … 5010 5063 Assert(!pCtx->eflags.Bits.u1VM); 5011 5064 5065 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(pVCpu, iCrReg)) 5066 { 5067 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5068 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg); 5069 } 5070 5012 5071 /* read it */ 5013 5072 uint64_t crX; … … 5051 5110 * @param iCrReg The CRx register to write (valid). 5052 5111 * @param uNewCrX The new value. 5053 */ 5054 IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX) 5112 * @param enmAccessCrx The instruction that caused the CrX load. 5113 * @param iGReg The general register in case of a 'mov CRx,GReg' 5114 * instruction. 5115 */ 5116 IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg) 5055 5117 { 5056 5118 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5057 5119 VBOXSTRICTRC rcStrict; 5058 5120 int rc; 5121 #ifndef VBOX_WITH_NESTED_HWVIRT 5122 RT_NOREF2(iGReg, enmAccessCrX); 5123 #endif 5059 5124 5060 5125 /* … … 5128 5193 5129 5194 /* 5195 * SVM nested-guest CR0 write intercepts. 5196 */ 5197 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg)) 5198 { 5199 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5200 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg); 5201 } 5202 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITES)) 5203 { 5204 /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */ 5205 if ( enmAccessCrX == IEMACCESSCRX_LMSW 5206 || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP))) 5207 { 5208 Assert(enmAccessCrX != IEMACCESSCRX_CLTS); 5209 Log(("iemCImpl_load_Cr%#x: TS/MP bit changed or lmsw instr: Guest intercept -> #VMEXIT\n", iCrReg)); 5210 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CR0_SEL_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5211 } 5212 } 5213 5214 /* 5130 5215 * Change CR0. 5131 5216 */ … … 5186 5271 */ 5187 5272 case 2: 5273 { 5274 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2)) 5275 { 5276 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5277 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg); 5278 } 5188 5279 pCtx->cr2 = uNewCrX; 5189 5280 rcStrict = VINF_SUCCESS; 5190 5281 break; 5282 } 5191 5283 5192 5284 /* … … 5219 5311 uNewCrX, uNewCrX & ~fValid)); 5220 5312 uNewCrX &= fValid; 5313 } 5314 5315 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3)) 5316 { 5317 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5318 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg); 5221 5319 } 5222 5320 … … 5284 5382 } 5285 5383 5384 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4)) 5385 { 5386 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5387 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg); 5388 } 5286 5389 5287 5390 /* … … 5337 5440 } 5338 5441 5442 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8)) 5443 { 5444 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5445 IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg); 5446 } 5447 5339 5448 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5340 5449 APICSetTpr(pVCpu, (uint8_t)uNewCrX << 4); … … 5379 5488 else 5380 5489 uNewCrX = iemGRegFetchU32(pVCpu, iGReg); 5381 return IEM_CIMPL_CALL_ 2(iemCImpl_load_CrX, iCrReg, uNewCrX);5490 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg); 5382 5491 } 5383 5492 … … 5401 5510 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 5402 5511 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 5403 return IEM_CIMPL_CALL_ 2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);5512 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */); 5404 5513 } 5405 5514 … … 5416 5525 uint64_t uNewCr0 = pCtx->cr0; 5417 5526 uNewCr0 &= ~X86_CR0_TS; 5418 return IEM_CIMPL_CALL_ 2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);5527 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */); 5419 5528 } 5420 5529 … … 5478 5587 } 5479 5588 5589 /** @todo SVM nested-guest intercept for DR8-DR15? */ 5590 /* 5591 * Check for any SVM nested-guest intercepts for the DRx read. 5592 */ 5593 if (IEM_IS_SVM_READ_DR_INTERCEPT_SET(pVCpu, iDrReg)) 5594 { 5595 Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg)); 5596 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf), 5597 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? (iGReg & 7) : 0, 0 /* uExitInfo2 */); 5598 } 5599 5480 5600 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 5481 5601 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = drX; … … 5568 5688 } 5569 5689 5690 /** @todo SVM nested-guest intercept for DR8-DR15? */ 5691 /* 5692 * Check for any SVM nested-guest intercepts for the DRx write. 5693 */ 5694 if (IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg)) 5695 { 5696 Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg)); 5697 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf), 5698 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? (iGReg & 7) : 0, 0 /* uExitInfo2 */); 5699 } 5700 5570 5701 /* 5571 5702 * Do the actual setting. … … 5597 5728 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM); 5598 5729 5730 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG)) 5731 { 5732 Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage)); 5733 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_INVLPG, 5734 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? GCPtrPage : 0, 0 /* uExitInfo2 */); 5735 } 5736 5599 5737 int rc = PGMInvalidatePage(pVCpu, GCPtrPage); 5600 5738 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 5629 5767 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl)); 5630 5768 return iemRaiseGeneralProtectionFault0(pVCpu); 5769 } 5770 5771 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC)) 5772 { 5773 Log(("rdtsc: Guest intercept -> #VMEXIT\n")); 5774 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5631 5775 } 5632 5776 … … 5647 5791 5648 5792 /** 5793 * Implements RDTSC. 5794 */ 5795 IEM_CIMPL_DEF_0(iemCImpl_rdtscp) 5796 { 5797 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5798 5799 /* 5800 * Check preconditions. 5801 */ 5802 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP) 5803 return iemRaiseUndefinedOpcode(pVCpu); 5804 5805 if ( (pCtx->cr4 & X86_CR4_TSD) 5806 && pVCpu->iem.s.uCpl != 0) 5807 { 5808 Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl)); 5809 return iemRaiseGeneralProtectionFault0(pVCpu); 5810 } 5811 5812 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP)) 5813 { 5814 Log(("rdtscp: Guest intercept -> #VMEXIT\n")); 5815 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5816 } 5817 5818 /* 5819 * Do the job. 5820 * Query the MSR first in case of trips to ring-3. 5821 */ 5822 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx); 5823 if (rcStrict == VINF_SUCCESS) 5824 { 5825 /* Low dword of the TSC_AUX msr only. */ 5826 pCtx->rcx &= UINT32_C(0xffffffff); 5827 5828 uint64_t uTicks = TMCpuTickGet(pVCpu); 5829 pCtx->rax = (uint32_t)uTicks; 5830 pCtx->rdx = uTicks >> 32; 5831 #ifdef IEM_VERIFICATION_MODE_FULL 5832 pVCpu->iem.s.fIgnoreRaxRdx = true; 5833 #endif 5834 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5835 } 5836 return rcStrict; 5837 } 5838 5839 5840 /** 5841 * Implements RDPMC. 5842 */ 5843 IEM_CIMPL_DEF_0(iemCImpl_rdpmc) 5844 { 5845 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5846 if ( pVCpu->iem.s.uCpl != 0 5847 && !(pCtx->cr4 & X86_CR4_PCE)) 5848 return iemRaiseGeneralProtectionFault0(pVCpu); 5849 5850 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC)) 5851 { 5852 Log(("rdpmc: Guest intercept -> #VMEXIT\n")); 5853 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5854 } 5855 5856 /** @todo Implement RDPMC for the regular guest execution case (the above only 5857 * handles nested-guest intercepts). */ 5858 RT_NOREF(cbInstr); 5859 return VERR_IEM_INSTR_NOT_IMPLEMENTED; 5860 } 5861 5862 5863 /** 5649 5864 * Implements RDMSR. 5650 5865 */ … … 5665 5880 */ 5666 5881 RTUINT64U uValue; 5667 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pCtx->ecx, &uValue.u); 5882 VBOXSTRICTRC rcStrict; 5883 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT)) 5884 { 5885 rcStrict = IEM_SVM_NST_GST_MSR_INTERCEPT(pVCpu, pCtx->ecx, false /* fWrite */); 5886 if (rcStrict == VINF_SVM_VMEXIT) 5887 return VINF_SUCCESS; 5888 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE) 5889 { 5890 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict))); 5891 return rcStrict; 5892 } 5893 } 5894 5895 rcStrict = CPUMQueryGuestMsr(pVCpu, pCtx->ecx, &uValue.u); 5668 5896 if (rcStrict == VINF_SUCCESS) 5669 5897 { … … 5718 5946 5719 5947 VBOXSTRICTRC rcStrict; 5948 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT)) 5949 { 5950 rcStrict = IEM_SVM_NST_GST_MSR_INTERCEPT(pVCpu, pCtx->ecx, true /* fWrite */); 5951 if (rcStrict == VINF_SVM_VMEXIT) 5952 return VINF_SUCCESS; 5953 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE) 5954 { 5955 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pCtx->ecx, VBOXSTRICTRC_VAL(rcStrict))); 5956 return rcStrict; 5957 } 5958 } 5959 5720 5960 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 5721 5961 rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u); … … 5776 6016 5777 6017 /* 6018 * Check SVM nested-guest IO intercept. 6019 */ 6020 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT)) 6021 { 6022 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, 0 /* N/A - cAddrSizeBits */, 6023 0 /* N/A - iEffSeg */, false /* fRep */, false /* fStrIo */, cbInstr); 6024 if (rcStrict == VINF_SVM_VMEXIT) 6025 return VINF_SUCCESS; 6026 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE) 6027 { 6028 Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg, 6029 VBOXSTRICTRC_VAL(rcStrict))); 6030 return rcStrict; 6031 } 6032 } 6033 6034 /* 5778 6035 * Perform the I/O. 5779 6036 */ … … 5846 6103 5847 6104 /* 6105 * Check SVM nested-guest IO intercept. 6106 */ 6107 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT)) 6108 { 6109 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, 0 /* N/A - cAddrSizeBits */, 6110 0 /* N/A - iEffSeg */, false /* fRep */, false /* fStrIo */, cbInstr); 6111 if (rcStrict == VINF_SVM_VMEXIT) 6112 return VINF_SUCCESS; 6113 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE) 6114 { 6115 Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg, 6116 VBOXSTRICTRC_VAL(rcStrict))); 6117 return rcStrict; 6118 } 6119 } 6120 6121 /* 5848 6122 * Perform the I/O. 5849 6123 */ … … 5914 6188 } 5915 6189 5916 #ifndef IN_RC5917 6190 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN)) 5918 6191 { 5919 6192 Log(("vmrun: Guest intercept -> #VMEXIT\n")); 5920 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5921 } 5922 #endif 6193 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6194 } 5923 6195 5924 6196 VBOXSTRICTRC rcStrict = HMSvmVmrun(pVCpu, pCtx, GCPhysVmcb); … … 5941 6213 { 5942 6214 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5943 #ifndef IN_RC5944 6215 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL)) 5945 6216 { 5946 Log(("vmrun: Guest intercept -> #VMEXIT\n")); 5947 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5948 } 5949 #endif 6217 Log(("vmmcall: Guest intercept -> #VMEXIT\n")); 6218 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6219 } 5950 6220 5951 6221 bool fUpdatedRipAndRF; … … 5969 6239 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5970 6240 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload); 5971 #ifndef IN_RC5972 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))5973 {5974 Log(("vmload: Guest intercept -> #VMEXIT\n"));5975 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);5976 }5977 #endif5978 6241 5979 6242 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax; … … 5983 6246 Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb)); 5984 6247 return iemRaiseGeneralProtectionFault0(pVCpu); 6248 } 6249 6250 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD)) 6251 { 6252 Log(("vmload: Guest intercept -> #VMEXIT\n")); 6253 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5985 6254 } 5986 6255 … … 6020 6289 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6021 6290 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave); 6022 #ifndef IN_RC6023 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))6024 {6025 Log(("vmsave: Guest intercept -> #VMEXIT\n"));6026 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6027 }6028 #endif6029 6291 6030 6292 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax; … … 6034 6296 Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb)); 6035 6297 return iemRaiseGeneralProtectionFault0(pVCpu); 6298 } 6299 6300 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE)) 6301 { 6302 Log(("vmsave: Guest intercept -> #VMEXIT\n")); 6303 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6036 6304 } 6037 6305 … … 6071 6339 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6072 6340 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi); 6073 #ifndef IN_RC6074 6341 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI)) 6075 6342 { 6076 6343 Log(("clgi: Guest intercept -> #VMEXIT\n")); 6077 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6078 } 6079 #endif 6344 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6345 } 6080 6346 6081 6347 pCtx->hwvirt.svm.fGif = 0; … … 6092 6358 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6093 6359 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi); 6094 #ifndef IN_RC6095 6360 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI)) 6096 6361 { 6097 6362 Log2(("stgi: Guest intercept -> #VMEXIT\n")); 6098 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6099 } 6100 #endif 6363 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6364 } 6101 6365 6102 6366 pCtx->hwvirt.svm.fGif = 1; … … 6112 6376 { 6113 6377 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6114 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);6115 #ifndef IN_RC6116 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))6117 {6118 Log2(("invlpga: Guest intercept -> #VMEXIT\n"));6119 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6120 }6121 #endif6122 6123 6378 RTGCPTR const GCPtrPage = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax; 6124 6379 /** @todo PGM needs virtual ASID support. */ … … 6126 6381 uint32_t const uAsid = pCtx->ecx; 6127 6382 #endif 6383 6384 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga); 6385 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA)) 6386 { 6387 Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage)); 6388 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6389 } 6390 6128 6391 PGMInvalidatePage(pVCpu, GCPtrPage); 6129 6392 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6130 6393 return VINF_SUCCESS; 6394 } 6395 6396 6397 /** 6398 * Implements 'SKINIT'. 6399 */ 6400 IEM_CIMPL_DEF_0(iemCImpl_skinit) 6401 { 6402 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga); 6403 6404 uint32_t uIgnore; 6405 uint32_t fFeaturesECX; 6406 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore); 6407 if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT)) 6408 return iemRaiseUndefinedOpcode(pVCpu); 6409 6410 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT)) 6411 { 6412 Log2(("skinit: Guest intercept -> #VMEXIT\n")); 6413 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6414 } 6415 6416 RT_NOREF(cbInstr); 6417 return VERR_IEM_INSTR_NOT_IMPLEMENTED; 6131 6418 } 6132 6419 #endif /* VBOX_WITH_NESTED_HWVIRT */ … … 6228 6515 if (pVCpu->iem.s.uCpl != 0) 6229 6516 return iemRaiseGeneralProtectionFault0(pVCpu); 6517 6518 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT)) 6519 { 6520 Log2(("hlt: Guest intercept -> #VMEXIT\n")); 6521 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6522 } 6523 6230 6524 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6231 6525 return VINF_EM_HALT; … … 6276 6570 return rcStrict; 6277 6571 6572 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR)) 6573 { 6574 Log2(("monitor: Guest intercept -> #VMEXIT\n")); 6575 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6576 } 6577 6278 6578 /* 6279 6579 * Call EM to prepare the monitor/wait. … … 6334 6634 6335 6635 /* 6636 * Check SVM nested-guest mwait intercepts. 6637 */ 6638 if ( IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED) 6639 && EMMonitorIsArmed(pVCpu)) 6640 { 6641 Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n")); 6642 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6643 } 6644 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT)) 6645 { 6646 Log2(("mwait: Guest intercept -> #VMEXIT\n")); 6647 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6648 } 6649 6650 /* 6336 6651 * Call EM to prepare the monitor/wait. 6337 6652 */ … … 6378 6693 { 6379 6694 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6695 6696 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID)) 6697 { 6698 Log2(("cpuid: Guest intercept -> #VMEXIT\n")); 6699 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6700 } 6380 6701 6381 6702 CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx); … … 6726 7047 if (pCtx->cr4 & X86_CR4_OSXSAVE) 6727 7048 { 7049 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV)) 7050 { 7051 Log2(("xsetbv: Guest intercept -> #VMEXIT\n")); 7052 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 7053 } 7054 6728 7055 if (pVCpu->iem.s.uCpl == 0) 6729 7056 { -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h
r62478 r66581 1218 1218 } 1219 1219 1220 /* 1221 * Check SVM nested-guest IO intercept. 1222 */ 1223 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT)) 1224 { 1225 rcStrict = iemSvmHandleIOIntercept(pVCpu, pCtx->dx, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, false /* fRep */, 1226 true /* fStrIo */, cbInstr); 1227 if (rcStrict == VINF_SVM_VMEXIT) 1228 return VINF_SUCCESS; 1229 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE) 1230 { 1231 Log(("iemCImpl_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pCtx->dx, OP_SIZE / 8, 1232 VBOXSTRICTRC_VAL(rcStrict))); 1233 return rcStrict; 1234 } 1235 } 1236 1220 1237 OP_TYPE *puMem; 1221 1238 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W); … … 1269 1286 if (rcStrict != VINF_SUCCESS) 1270 1287 return rcStrict; 1288 } 1289 1290 /* 1291 * Check SVM nested-guest IO intercept. 1292 */ 1293 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT)) 1294 { 1295 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, true /* fRep */, 1296 true /* fStrIo */, cbInstr); 1297 if (rcStrict == VINF_SVM_VMEXIT) 1298 return VINF_SUCCESS; 1299 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE) 1300 { 1301 Log(("iemCImpl_rep_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8, 1302 VBOXSTRICTRC_VAL(rcStrict))); 1303 return rcStrict; 1304 } 1271 1305 } 1272 1306 … … 1455 1489 } 1456 1490 1491 /* 1492 * Check SVM nested-guest IO intercept. 1493 */ 1494 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT)) 1495 { 1496 rcStrict = iemSvmHandleIOIntercept(pVCpu, pCtx->dx, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, false /* fRep */, 1497 true /* fStrIo */, cbInstr); 1498 if (rcStrict == VINF_SVM_VMEXIT) 1499 return VINF_SUCCESS; 1500 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE) 1501 { 1502 Log(("iemCImpl_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pCtx->dx, OP_SIZE / 8, 1503 VBOXSTRICTRC_VAL(rcStrict))); 1504 return rcStrict; 1505 } 1506 } 1507 1457 1508 OP_TYPE uValue; 1458 1509 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, pCtx->ADDR_rSI); … … 1496 1547 if (rcStrict != VINF_SUCCESS) 1497 1548 return rcStrict; 1549 } 1550 1551 /* 1552 * Check SVM nested-guest IO intercept. 1553 */ 1554 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT)) 1555 { 1556 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, true /* fRep */, 1557 true /* fStrIo */, cbInstr); 1558 if (rcStrict == VINF_SVM_VMEXIT) 1559 return VINF_SUCCESS; 1560 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE) 1561 { 1562 Log(("iemCImpl_rep_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8, 1563 VBOXSTRICTRC_VAL(rcStrict))); 1564 return rcStrict; 1565 } 1498 1566 } 1499 1567 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h
r66479 r66581 4454 4454 4455 4455 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK) 4456 { 4456 4457 IEMOP_MNEMONIC(pause, "pause"); 4458 #ifdef VBOX_WITH_NESTED_HWVIRT 4459 /** @todo Pause filter count and threshold with SVM nested hardware virt. */ 4460 Assert(!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilter); 4461 Assert(!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilterThreshold); 4462 #endif 4463 IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_PAUSE, SVM_EXIT_PAUSE, 0, 0); 4464 } 4457 4465 else 4458 4466 IEMOP_MNEMONIC(nop, "nop"); … … 10582 10590 IEMOP_HLP_MIN_386(); /** @todo does not generate #UD on 286, or so they say... */ 10583 10591 /** @todo testcase! */ 10592 IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_ICEBP, SVM_EXIT_ICEBP, 0, 0); 10584 10593 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_DB, false /*fIsBpInstr*/); 10585 10594 } -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r66474 r66581 35 35 { 36 36 IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); 37 IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0); 37 38 switch (pVCpu->iem.s.enmEffOpSize) 38 39 { … … 74 75 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 75 76 IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); 77 IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0); 76 78 IEM_MC_FETCH_LDTR_U16(u16Ldtr); 77 79 IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Ldtr); … … 93 95 { 94 96 IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); 97 IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0); 95 98 switch (pVCpu->iem.s.enmEffOpSize) 96 99 { … … 132 135 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 133 136 IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); 137 IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0); 134 138 IEM_MC_FETCH_TR_U16(u16Tr); 135 139 IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Tr); … … 482 486 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga); 483 487 } 488 489 490 /** Opcode 0x0f 0x01 0xde. */ 491 FNIEMOP_DEF(iemOp_Grp7_Amd_skinit) 492 { 493 IEMOP_MNEMONIC(skinit, "skinit"); 494 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_skinit); 495 } 484 496 #else 485 497 /** Opcode 0x0f 0x01 0xd8. */ … … 503 515 /** Opcode 0x0f 0x01 0xdf. */ 504 516 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga); 505 #endif /* VBOX_WITH_NESTED_HWVIRT */506 517 507 518 /** Opcode 0x0f 0x01 0xde. */ 508 519 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_skinit); 520 #endif /* VBOX_WITH_NESTED_HWVIRT */ 509 521 510 522 /** Opcode 0x0f 0x01 /4. */ … … 516 528 { 517 529 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 530 IEMOP_HLP_SVM_READ_CR_INTERCEPT(pVCpu, /*cr*/ 0, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 518 531 switch (pVCpu->iem.s.enmEffOpSize) 519 532 { … … 562 575 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 563 576 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 577 IEMOP_HLP_SVM_READ_CR_INTERCEPT(pVCpu, /*cr*/ 0, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 564 578 IEM_MC_FETCH_CR0_U16(u16Tmp); 565 579 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386) … … 636 650 FNIEMOP_DEF(iemOp_Grp7_rdtscp) 637 651 { 638 NOREF(pVCpu); 639 IEMOP_BITCH_ABOUT_STUB(); 640 return VERR_IEM_INSTR_NOT_IMPLEMENTED; 652 IEMOP_MNEMONIC(rdtscp, "rdtscp"); 653 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 654 /** @todo SVM intercept removal from here. */ 655 IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP, SVM_EXIT_RDTSCP, 0, 0); 656 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtscp); 641 657 } 642 658 … … 868 884 869 885 /** Opcode 0x0f 0x08. */ 870 FNIEMOP_STUB(iemOp_invd); 886 FNIEMOP_DEF(iemOp_invd) 887 { 888 IEMOP_MNEMONIC(invd, "invd"); 889 #ifdef VBOX_WITH_NESTED_HWVIRT 890 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); 891 IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0); 892 #endif 893 /** @todo implement invd for the regular case (above only handles nested SVM 894 * exits). */ 895 IEMOP_BITCH_ABOUT_STUB(); 896 return VERR_IEM_INSTR_NOT_IMPLEMENTED; 897 } 898 871 899 // IEMOP_HLP_MIN_486(); 872 900 … … 880 908 IEM_MC_BEGIN(0, 0); 881 909 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); 910 IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0); 882 911 IEM_MC_ADVANCE_RIP(); 883 912 IEM_MC_END(); … … 2031 2060 2032 2061 /** Opcode 0x0f 0x34. */ 2033 FNIEMOP_STUB(iemOp_rdpmc); 2062 FNIEMOP_DEF(iemOp_rdpmc) 2063 { 2064 IEMOP_MNEMONIC(rdpmc, "rdpmc"); 2065 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2066 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdpmc); 2067 } 2068 2069 2034 2070 /** Opcode 0x0f 0x34. */ 2035 2071 FNIEMOP_STUB(iemOp_sysenter); … … 5722 5758 5723 5759 /** Opcode 0x0f 0xaa. */ 5724 FNIEMOP_STUB(iemOp_rsm); 5760 FNIEMOP_DEF(iemOp_rsm) 5761 { 5762 IEMOP_MNEMONIC(rsm, "rsm"); 5763 IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0); 5764 /** @todo rsm - for the regular case (above handles only the SVM nested-guest 5765 * intercept). */ 5766 IEMOP_BITCH_ABOUT_STUB(); 5767 return IEMOP_RAISE_INVALID_OPCODE(); 5768 } 5769 5725 5770 //IEMOP_HLP_MIN_386(); 5726 5771
Note:
See TracChangeset
for help on using the changeset viewer.