Changeset 67528 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jun 21, 2017 8:26:45 AM (7 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r67163 r67528 103 103 #include <VBox/vmm/hm.h> 104 104 #ifdef VBOX_WITH_NESTED_HWVIRT 105 # include <VBox/vmm/em.h> 105 106 # include <VBox/vmm/hm_svm.h> 106 107 #endif … … 447 448 * Invokes the SVM \#VMEXIT handler for the nested-guest. 448 449 */ 449 # define IEM_RETURN_SVM_ NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \450 # define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \ 450 451 do \ 451 452 { \ 452 VBOXSTRICTRC rcStrictVmExit = HMSvmNstGstVmExit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), \ 453 (a_uExitInfo2)); \ 454 return rcStrictVmExit == VINF_SVM_VMEXIT ? VINF_SUCCESS : rcStrictVmExit; \ 453 return iemSvmVmexit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \ 455 454 } while (0) 456 455 … … 459 458 * corresponding decode assist information. 460 459 */ 461 # define IEM_RETURN_SVM_ NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \460 # define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \ 462 461 do \ 463 462 { \ … … 468 467 else \ 469 468 uExitInfo1 = 0; \ 470 IEM_RETURN_SVM_ NST_GST_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \469 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \ 471 470 } while (0) 472 473 /**474 * Checks and handles an SVM MSR intercept.475 */476 # define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) \477 HMSvmNstGstHandleMsrIntercept((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_idMsr), (a_fWrite))478 471 479 472 #else … … 486 479 # define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false) 487 480 # define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false) 488 # define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0) 489 # define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0) 490 # define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) (VERR_SVM_IPE_1) 481 # define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0) 482 # define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0) 491 483 492 484 #endif /* VBOX_WITH_NESTED_HWVIRT */ … … 902 894 903 895 #ifdef VBOX_WITH_NESTED_HWVIRT 904 /** 905 * Checks if the intercepted IO instruction causes a \#VMEXIT and handles it 906 * accordingly. 907 * 908 * @returns VBox strict status code. 909 * @param pVCpu The cross context virtual CPU structure of the calling thread. 910 * @param u16Port The IO port being accessed. 911 * @param enmIoType The type of IO access. 912 * @param cbReg The IO operand size in bytes. 913 * @param cAddrSizeBits The address size bits (for 16, 32 or 64). 914 * @param iEffSeg The effective segment number. 915 * @param fRep Whether this is a repeating IO instruction (REP prefix). 916 * @param fStrIo Whether this is a string IO instruction. 917 * @param cbInstr The length of the IO instruction in bytes. 918 * 919 * @remarks This must be called only when IO instructions are intercepted by the 920 * nested-guest hypervisor. 921 */ 922 IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg, 923 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr) 924 { 925 Assert(IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT)); 926 Assert(cAddrSizeBits == 0 || cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64); 927 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8); 928 929 static const uint32_t s_auIoOpSize[] = { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 }; 930 static const uint32_t s_auIoAddrSize[] = { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 }; 931 932 SVMIOIOEXITINFO IoExitInfo; 933 IoExitInfo.u = s_auIoOpSize[cbReg & 7]; 934 IoExitInfo.u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7]; 935 IoExitInfo.n.u1STR = fStrIo; 936 IoExitInfo.n.u1REP = fRep; 937 IoExitInfo.n.u3SEG = iEffSeg & 0x7; 938 IoExitInfo.n.u1Type = enmIoType; 939 IoExitInfo.n.u16Port = u16Port; 940 941 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 942 return HMSvmNstGstHandleIOIntercept(pVCpu, pCtx, &IoExitInfo, pCtx->rip + cbInstr); 943 } 944 945 #else 946 IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg, 947 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr) 948 { 949 RT_NOREF9(pVCpu, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo, cbInstr); 950 return VERR_IEM_IPE_9; 951 } 952 #endif /* VBOX_WITH_NESTED_HWVIRT */ 953 896 IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1, 897 uint64_t uExitInfo2); 898 IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags, 899 uint32_t uErr, uint64_t uCr2); 900 #endif 954 901 955 902 /** … … 1091 1038 } 1092 1039 1040 #ifdef VBOX_WITH_NESTED_HWVIRT 1041 /** 1042 * Performs a minimal reinitialization of the execution state. 1043 * 1044 * This is intended to be used by VM-exits, SMM, LOADALL and other similar 1045 * 'world-switch' types operations on the CPU. Currently only nested 1046 * hardware-virtualization uses it. 1047 * 1048 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 1049 */ 1050 IEM_STATIC void iemReInitExec(PVMCPU pVCpu) 1051 { 1052 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu); 1053 IEMMODE const enmMode = iemCalcCpuMode(pCtx); 1054 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu); 1055 1056 pVCpu->iem.s.uCpl = uCpl; 1057 pVCpu->iem.s.enmCpuMode = enmMode; 1058 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */ 1059 pVCpu->iem.s.enmEffAddrMode = enmMode; 1060 if (enmMode != IEMMODE_64BIT) 1061 { 1062 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */ 1063 pVCpu->iem.s.enmEffOpSize = enmMode; 1064 } 1065 else 1066 { 1067 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT; 1068 pVCpu->iem.s.enmEffOpSize = enmMode; 1069 } 1070 pVCpu->iem.s.iEffSeg = X86_SREG_DS; 1071 #ifndef IEM_WITH_CODE_TLB 1072 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */ 1073 pVCpu->iem.s.offOpcode = 0; 1074 pVCpu->iem.s.cbOpcode = 0; 1075 #endif 1076 } 1077 #endif 1093 1078 1094 1079 /** … … 3366 3351 { 3367 3352 Log2(("shutdown: Guest intercept -> #VMEXIT\n")); 3368 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);3353 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 3369 3354 } 3370 3355 … … 3373 3358 } 3374 3359 3375 3376 #ifdef VBOX_WITH_NESTED_HWVIRT3377 IEM_STATIC VBOXSTRICTRC iemHandleSvmNstGstEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,3378 uint32_t uErr, uint64_t uCr2)3379 {3380 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));3381 3382 /*3383 * Handle nested-guest SVM exception and software interrupt intercepts,3384 * see AMD spec. 15.12 "Exception Intercepts".3385 *3386 * - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs.3387 * - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts3388 * even when they use a vector in the range 0 to 31.3389 * - ICEBP should not trigger #DB intercept, but its own intercept.3390 * - For #PF exceptions, its intercept is checked before CR2 is written by the exception.3391 */3392 /* Check NMI intercept */3393 if ( u8Vector == X86_XCPT_NMI3394 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)3395 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))3396 {3397 Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n"));3398 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);3399 }3400 3401 /* Check ICEBP intercept. */3402 if ( (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)3403 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_ICEBP))3404 {3405 Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n"));3406 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);3407 }3408 3409 /* Check CPU exception intercepts. */3410 if ( (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)3411 && IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector))3412 {3413 Assert(u8Vector <= X86_XCPT_LAST);3414 uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;3415 uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;3416 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist3417 && u8Vector == X86_XCPT_PF3418 && !(uErr & X86_TRAP_PF_ID))3419 {3420 /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */3421 #ifdef IEM_WITH_CODE_TLB3422 AssertReleaseFailedReturn(VERR_IEM_IPE_5);3423 #else3424 uint8_t const offOpCode = pVCpu->iem.s.offOpcode;3425 uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode;3426 if ( cbCurrent > 03427 && cbCurrent < sizeof(pCtx->hwvirt.svm.VmcbCtrl.abInstr))3428 {3429 Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode));3430 memcpy(&pCtx->hwvirt.svm.VmcbCtrl.abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent);3431 }3432 #endif3433 }3434 Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept. u32InterceptXcpt=%#RX32 u8Vector=%#x uExitInfo1=%#RX64, uExitInfo2=%#RX64 -> #VMEXIT\n",3435 pCtx->hwvirt.svm.VmcbCtrl.u32InterceptXcpt, u8Vector, uExitInfo1, uExitInfo2));3436 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2);3437 }3438 3439 /* Check software interrupt (INTn) intercepts. */3440 if ( (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT3441 | IEM_XCPT_FLAGS_BP_INSTR3442 | IEM_XCPT_FLAGS_ICEBP_INSTR3443 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT3444 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN))3445 {3446 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? u8Vector : 0;3447 Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));3448 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);3449 }3450 3451 return VINF_HM_INTERCEPT_NOT_ACTIVE;3452 }3453 #endif3454 3360 3455 3361 /** … … 5482 5388 * Check and handle if the event being raised is intercepted. 5483 5389 */ 5484 VBOXSTRICTRC rcStrict0 = iemHandleSvm NstGstEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);5390 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2); 5485 5391 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE) 5486 5392 return rcStrict0; … … 5525 5431 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */ 5526 5432 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF)) 5527 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);5433 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5528 5434 } 5529 5435 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT) … … 8125 8031 /** @todo Need a different PGM interface here. We're currently using 8126 8032 * generic / REM interfaces. this won't cut it for R0 & RC. */ 8033 /** @todo If/when PGM handles paged real-mode, we can remove the hack in 8034 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */ 8127 8035 RTGCPHYS GCPhys; 8128 8036 uint64_t fFlags; … … 10326 10234 { 10327 10235 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n")); 10328 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);10236 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 10329 10237 } 10330 10238 … … 12863 12771 { \ 12864 12772 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \ 12865 IEM_RETURN_SVM_ NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \12773 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \ 12866 12774 } while (0) 12867 12775 … … 12871 12779 { \ 12872 12780 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \ 12873 IEM_RETURN_SVM_ NST_GST_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \12781 IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \ 12874 12782 } while (0) 12875 12783 … … 13926 13834 */ 13927 13835 pVCpu->iem.s.uInjectCpl = UINT8_MAX; 13928 if ( pOrgCtx->eflags.Bits.u1IF 13836 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */ 13837 #if defined(VBOX_WITH_NESTED_HWVIRT) 13838 bool fIntrEnabled = pOrgCtx->hwvirt.svm.fGif; 13839 if (fIntrEnabled) 13840 { 13841 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 13842 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pCtx); 13843 else 13844 fIntrEnabled = pOrgCtx->eflags.Bits.u1IF; 13845 } 13846 #else 13847 bool fIntrEnabled = pOrgCtx->eflags.Bits.u1IF; 13848 #endif 13849 if ( fIntrEnabled 13929 13850 && TRPMHasTrap(pVCpu) 13930 13851 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) … … 14876 14797 || rcStrict == VINF_CSAM_PENDING_ACTION 14877 14798 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE 14799 /* nested hw.virt codes: */ 14800 || rcStrict == VINF_SVM_VMEXIT 14878 14801 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 14879 14802 /** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */ 14880 14803 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp; 14804 #ifdef VBOX_WITH_NESTED_HWVIRT 14805 if (rcStrict == VINF_SVM_VMEXIT) 14806 rcStrict = VINF_SUCCESS; 14807 else 14808 #endif 14881 14809 if (rcPassUp == VINF_SUCCESS) 14882 14810 pVCpu->iem.s.cRetInfStatuses++; … … 14911 14839 rcStrict = pVCpu->iem.s.rcPassUp; 14912 14840 } 14913 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM14914 else if (pVCpu->iem.s.fForceIemExec)14915 rcStrict = VINF_EM_RESCHEDULE_REM;14916 #endif14917 14841 14918 14842 return rcStrict; … … 15268 15192 pVCpu->iem.s.uInjectCpl = UINT8_MAX; 15269 15193 # endif 15270 if ( pCtx->eflags.Bits.u1IF 15194 15195 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */ 15196 #if defined(VBOX_WITH_NESTED_HWVIRT) 15197 bool fIntrEnabled = pCtx->hwvirt.svm.fGif; 15198 if (fIntrEnabled) 15199 { 15200 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 15201 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pCtx); 15202 else 15203 fIntrEnabled = pCtx->eflags.Bits.u1IF; 15204 } 15205 #else 15206 bool fIntrEnabled = pCtx->eflags.Bits.u1IF; 15207 #endif 15208 if ( fIntrEnabled 15271 15209 && TRPMHasTrap(pVCpu) 15272 15210 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip) … … 15320 15258 pVCpu->iem.s.uInjectCpl = UINT8_MAX; 15321 15259 # endif 15322 if ( pCtx->eflags.Bits.u1IF 15260 15261 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */ 15262 #if defined(VBOX_WITH_NESTED_HWVIRT) 15263 bool fIntrEnabled = pCtx->hwvirt.svm.fGif; 15264 if (fIntrEnabled) 15265 { 15266 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 15267 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pCtx); 15268 else 15269 fIntrEnabled = pCtx->eflags.Bits.u1IF; 15270 } 15271 #else 15272 bool fIntrEnabled = pCtx->eflags.Bits.u1IF; 15273 #endif 15274 if ( fIntrEnabled 15323 15275 && TRPMHasTrap(pVCpu) 15324 15276 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip) … … 15414 15366 iemMemRollback(pVCpu); 15415 15367 pVCpu->iem.s.cLongJumps++; 15368 /** @todo Why isn't iemExecStatusCodeFiddling called here always? */ 15369 # ifdef VBOX_WITH_NESTED_HWVIRT 15370 /* 15371 * When a nested-guest causes an exception intercept when fetching memory 15372 * (e.g. IEM_MC_FETCH_MEM_U16) as part of instruction execution, we need this 15373 * to fix-up VINF_SVM_VMEXIT on the longjmp way out, otherwise we will guru. 15374 */ 15375 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict); 15376 # endif 15416 15377 } 15417 15378 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf; … … 16048 16009 } 16049 16010 16050 16051 16011 #ifdef VBOX_WITH_NESTED_HWVIRT 16052 16012 /** … … 16137 16097 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga); 16138 16098 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 16099 } 16100 16101 16102 /** 16103 * Interface for HM and EM to emulate the VMRUN instruction. 16104 * 16105 * @returns Strict VBox status code. 16106 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 16107 * @param cbInstr The instruction length in bytes. 16108 * @thread EMT(pVCpu) 16109 */ 16110 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr) 16111 { 16112 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3); 16113 16114 iemInitExec(pVCpu, false /*fBypassHandlers*/); 16115 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun); 16116 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 16117 } 16118 16119 16120 /** 16121 * Interface for HM and EM to emulate \#VMEXIT. 16122 * 16123 * @returns Strict VBox status code. 16124 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 16125 * @param uExitCode The exit code. 16126 * @param uExitInfo1 The exit info. 1 field. 16127 * @param uExitInfo2 The exit info. 2 field. 16128 * @thread EMT(pVCpu) 16129 */ 16130 VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) 16131 { 16132 return iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu), uExitCode, uExitInfo1, uExitInfo2); 16139 16133 } 16140 16134 #endif /* VBOX_WITH_NESTED_HWVIRT */ -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r67163 r67528 15 15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. 16 16 */ 17 18 #ifdef VBOX_WITH_NESTED_HWVIRT 19 # include "IEMAllCImplSvmInstr.cpp.h" 20 #endif 17 21 18 22 /** @name Misc Helpers … … 561 565 { 562 566 Log2(("pushf: Guest intercept -> #VMEXIT\n")); 563 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);567 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 564 568 } 565 569 … … 628 632 { 629 633 Log2(("popf: Guest intercept -> #VMEXIT\n")); 630 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);634 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 631 635 } 632 636 … … 2630 2634 RTUINT64U NewRsp; 2631 2635 NewRsp.u = pCtx->rsp; 2636 2632 2637 switch (enmEffOpSize) 2633 2638 { … … 3874 3879 { 3875 3880 Log(("iret: Guest intercept -> #VMEXIT\n")); 3876 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);3881 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 3877 3882 } 3878 3883 … … 4657 4662 { 4658 4663 Log(("lgdt: Guest intercept -> #VMEXIT\n")); 4659 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);4664 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4660 4665 } 4661 4666 … … 4729 4734 { 4730 4735 Log(("lidt: Guest intercept -> #VMEXIT\n")); 4731 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);4736 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4732 4737 } 4733 4738 … … 4821 4826 { 4822 4827 Log(("lldt: Guest intercept -> #VMEXIT\n")); 4823 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);4828 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4824 4829 } 4825 4830 … … 4902 4907 { 4903 4908 Log(("lldt: Guest intercept -> #VMEXIT\n")); 4904 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);4909 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4905 4910 } 4906 4911 … … 4959 4964 { 4960 4965 Log(("ltr: Guest intercept -> #VMEXIT\n")); 4961 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);4966 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4962 4967 } 4963 4968 … … 5065 5070 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(pVCpu, iCrReg)) 5066 5071 { 5067 Log(("iemCImpl_mov_Rd_Cd %#x: Guest intercept-> #VMEXIT\n", iCrReg));5068 IEM_RETURN_SVM_ NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);5072 Log(("iemCImpl_mov_Rd_Cd: Guest intercept CR%u -> #VMEXIT\n", iCrReg)); 5073 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg); 5069 5074 } 5070 5075 … … 5205 5210 { 5206 5211 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5207 IEM_RETURN_SVM_ NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);5212 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg); 5208 5213 } 5209 5214 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITES)) … … 5215 5220 Assert(enmAccessCrX != IEMACCESSCRX_CLTS); 5216 5221 Log(("iemCImpl_load_Cr%#x: TS/MP bit changed or lmsw instr: Guest intercept -> #VMEXIT\n", iCrReg)); 5217 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_CR0_SEL_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);5222 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_CR0_SEL_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5218 5223 } 5219 5224 } … … 5282 5287 { 5283 5288 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5284 IEM_RETURN_SVM_ NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);5289 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg); 5285 5290 } 5286 5291 pCtx->cr2 = uNewCrX; … … 5323 5328 { 5324 5329 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5325 IEM_RETURN_SVM_ NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);5330 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg); 5326 5331 } 5327 5332 … … 5392 5397 { 5393 5398 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5394 IEM_RETURN_SVM_ NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);5399 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg); 5395 5400 } 5396 5401 … … 5455 5460 { 5456 5461 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5457 IEM_RETURN_SVM_ NST_GST_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);5462 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg); 5458 5463 } 5459 5464 … … 5615 5620 { 5616 5621 Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg)); 5617 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),5618 5622 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf), 5623 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? (iGReg & 7) : 0, 0 /* uExitInfo2 */); 5619 5624 } 5620 5625 … … 5716 5721 { 5717 5722 Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg)); 5718 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),5719 5723 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf), 5724 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? (iGReg & 7) : 0, 0 /* uExitInfo2 */); 5720 5725 } 5721 5726 … … 5752 5757 { 5753 5758 Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage)); 5754 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_INVLPG,5755 5759 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_INVLPG, 5760 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? GCPtrPage : 0, 0 /* uExitInfo2 */); 5756 5761 } 5757 5762 … … 5793 5798 { 5794 5799 Log(("rdtsc: Guest intercept -> #VMEXIT\n")); 5795 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);5800 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5796 5801 } 5797 5802 … … 5834 5839 { 5835 5840 Log(("rdtscp: Guest intercept -> #VMEXIT\n")); 5836 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);5841 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5837 5842 } 5838 5843 … … 5872 5877 { 5873 5878 Log(("rdpmc: Guest intercept -> #VMEXIT\n")); 5874 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);5879 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5875 5880 } 5876 5881 … … 5902 5907 RTUINT64U uValue; 5903 5908 VBOXSTRICTRC rcStrict; 5909 #ifdef VBOX_WITH_NESTED_HWVIRT 5904 5910 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT)) 5905 5911 { 5906 rcStrict = IEM_SVM_NST_GST_MSR_INTERCEPT(pVCpu, pCtx->ecx, false /* fWrite */);5912 rcStrict = iemSvmHandleMsrIntercept(pVCpu, pCtx, pCtx->ecx, false /* fWrite */); 5907 5913 if (rcStrict == VINF_SVM_VMEXIT) 5908 5914 return VINF_SUCCESS; … … 5913 5919 } 5914 5920 } 5921 #endif 5915 5922 5916 5923 rcStrict = CPUMQueryGuestMsr(pVCpu, pCtx->ecx, &uValue.u); … … 5967 5974 5968 5975 VBOXSTRICTRC rcStrict; 5976 #ifdef VBOX_WITH_NESTED_HWVIRT 5969 5977 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT)) 5970 5978 { 5971 rcStrict = IEM_SVM_NST_GST_MSR_INTERCEPT(pVCpu, pCtx->ecx, true /* fWrite */);5979 rcStrict = iemSvmHandleMsrIntercept(pVCpu, pCtx, pCtx->ecx, true /* fWrite */); 5972 5980 if (rcStrict == VINF_SVM_VMEXIT) 5973 5981 return VINF_SUCCESS; … … 5978 5986 } 5979 5987 } 5988 #endif 5980 5989 5981 5990 if (!IEM_VERIFICATION_ENABLED(pVCpu)) … … 6039 6048 * Check SVM nested-guest IO intercept. 6040 6049 */ 6050 #ifdef VBOX_WITH_NESTED_HWVIRT 6041 6051 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT)) 6042 6052 { … … 6052 6062 } 6053 6063 } 6064 #endif 6054 6065 6055 6066 /* … … 6126 6137 * Check SVM nested-guest IO intercept. 6127 6138 */ 6139 #ifdef VBOX_WITH_NESTED_HWVIRT 6128 6140 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT)) 6129 6141 { … … 6139 6151 } 6140 6152 } 6153 #endif 6141 6154 6142 6155 /* … … 6191 6204 } 6192 6205 6193 6194 #ifdef VBOX_WITH_NESTED_HWVIRT6195 /**6196 * Implements 'VMRUN'.6197 */6198 IEM_CIMPL_DEF_0(iemCImpl_vmrun)6199 {6200 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6201 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);6202 6203 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;6204 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)6205 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))6206 {6207 Log(("vmrun: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));6208 return iemRaiseGeneralProtectionFault0(pVCpu);6209 }6210 6211 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))6212 {6213 Log(("vmrun: Guest intercept -> #VMEXIT\n"));6214 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6215 }6216 6217 VBOXSTRICTRC rcStrict = HMSvmVmrun(pVCpu, pCtx, cbInstr, GCPhysVmcb);6218 /* If VMRUN execution causes a #VMEXIT, we continue executing the instruction following the VMRUN. */6219 if (rcStrict == VINF_SVM_VMEXIT)6220 {6221 iemRegAddToRipAndClearRF(pVCpu, cbInstr);6222 rcStrict = VINF_SUCCESS;6223 }6224 else if (rcStrict == VERR_SVM_VMEXIT_FAILED)6225 rcStrict = iemInitiateCpuShutdown(pVCpu);6226 return rcStrict;6227 }6228 6229 6230 /**6231 * Implements 'VMMCALL'.6232 */6233 IEM_CIMPL_DEF_0(iemCImpl_vmmcall)6234 {6235 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6236 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))6237 {6238 Log(("vmmcall: Guest intercept -> #VMEXIT\n"));6239 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6240 }6241 6242 bool fUpdatedRipAndRF;6243 VBOXSTRICTRC rcStrict = HMSvmVmmcall(pVCpu, pCtx, &fUpdatedRipAndRF);6244 if (RT_SUCCESS(rcStrict))6245 {6246 if (!fUpdatedRipAndRF)6247 iemRegAddToRipAndClearRF(pVCpu, cbInstr);6248 return rcStrict;6249 }6250 6251 return iemRaiseUndefinedOpcode(pVCpu);6252 }6253 6254 6255 /**6256 * Implements 'VMLOAD'.6257 */6258 IEM_CIMPL_DEF_0(iemCImpl_vmload)6259 {6260 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6261 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);6262 6263 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;6264 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)6265 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))6266 {6267 Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));6268 return iemRaiseGeneralProtectionFault0(pVCpu);6269 }6270 6271 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))6272 {6273 Log(("vmload: Guest intercept -> #VMEXIT\n"));6274 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6275 }6276 6277 void *pvVmcb;6278 PGMPAGEMAPLOCK PgLockVmcb;6279 VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, &pvVmcb, &PgLockVmcb);6280 if (rcStrict == VINF_SUCCESS)6281 {6282 PCSVMVMCB pVmcb = (PCSVMVMCB)pvVmcb;6283 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, FS, fs);6284 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, GS, gs);6285 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, TR, tr);6286 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr);6287 6288 pCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase;6289 pCtx->msrSTAR = pVmcb->guest.u64STAR;6290 pCtx->msrLSTAR = pVmcb->guest.u64LSTAR;6291 pCtx->msrCSTAR = pVmcb->guest.u64CSTAR;6292 pCtx->msrSFMASK = pVmcb->guest.u64SFMASK;6293 6294 pCtx->SysEnter.cs = pVmcb->guest.u64SysEnterCS;6295 pCtx->SysEnter.esp = pVmcb->guest.u64SysEnterESP;6296 pCtx->SysEnter.eip = pVmcb->guest.u64SysEnterEIP;6297 6298 iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, pvVmcb, &PgLockVmcb);6299 iemRegAddToRipAndClearRF(pVCpu, cbInstr);6300 }6301 return rcStrict;6302 }6303 6304 6305 /**6306 * Implements 'VMSAVE'.6307 */6308 IEM_CIMPL_DEF_0(iemCImpl_vmsave)6309 {6310 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6311 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave);6312 6313 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;6314 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)6315 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))6316 {6317 Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));6318 return iemRaiseGeneralProtectionFault0(pVCpu);6319 }6320 6321 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))6322 {6323 Log(("vmsave: Guest intercept -> #VMEXIT\n"));6324 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6325 }6326 6327 void *pvVmcb;6328 PGMPAGEMAPLOCK PgLockVmcb;6329 VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, &pvVmcb, &PgLockVmcb);6330 if (rcStrict == VINF_SUCCESS)6331 {6332 PSVMVMCB pVmcb = (PSVMVMCB)pvVmcb;6333 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, FS, fs);6334 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, GS, gs);6335 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, TR, tr);6336 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr);6337 6338 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;6339 pVmcb->guest.u64STAR = pCtx->msrSTAR;6340 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR;6341 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR;6342 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK;6343 6344 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs;6345 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;6346 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;6347 6348 iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, pvVmcb, &PgLockVmcb);6349 iemRegAddToRipAndClearRF(pVCpu, cbInstr);6350 }6351 return rcStrict;6352 }6353 6354 6355 /**6356 * Implements 'CLGI'.6357 */6358 IEM_CIMPL_DEF_0(iemCImpl_clgi)6359 {6360 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6361 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi);6362 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))6363 {6364 Log(("clgi: Guest intercept -> #VMEXIT\n"));6365 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6366 }6367 6368 pCtx->hwvirt.svm.fGif = 0;6369 iemRegAddToRipAndClearRF(pVCpu, cbInstr);6370 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM6371 pVCpu->iem.s.fForceIemExec = true;6372 #endif6373 return VINF_SUCCESS;6374 }6375 6376 6377 /**6378 * Implements 'STGI'.6379 */6380 IEM_CIMPL_DEF_0(iemCImpl_stgi)6381 {6382 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6383 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi);6384 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))6385 {6386 Log2(("stgi: Guest intercept -> #VMEXIT\n"));6387 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6388 }6389 6390 pCtx->hwvirt.svm.fGif = 1;6391 iemRegAddToRipAndClearRF(pVCpu, cbInstr);6392 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM6393 pVCpu->iem.s.fForceIemExec = false;6394 #endif6395 return VINF_SUCCESS;6396 }6397 6398 6399 /**6400 * Implements 'INVLPGA'.6401 */6402 IEM_CIMPL_DEF_0(iemCImpl_invlpga)6403 {6404 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6405 RTGCPTR const GCPtrPage = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;6406 /** @todo PGM needs virtual ASID support. */6407 #if 06408 uint32_t const uAsid = pCtx->ecx;6409 #endif6410 6411 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);6412 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))6413 {6414 Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));6415 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6416 }6417 6418 PGMInvalidatePage(pVCpu, GCPtrPage);6419 iemRegAddToRipAndClearRF(pVCpu, cbInstr);6420 return VINF_SUCCESS;6421 }6422 6423 6424 /**6425 * Implements 'SKINIT'.6426 */6427 IEM_CIMPL_DEF_0(iemCImpl_skinit)6428 {6429 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);6430 6431 uint32_t uIgnore;6432 uint32_t fFeaturesECX;6433 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore);6434 if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT))6435 return iemRaiseUndefinedOpcode(pVCpu);6436 6437 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))6438 {6439 Log2(("skinit: Guest intercept -> #VMEXIT\n"));6440 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6441 }6442 6443 RT_NOREF(cbInstr);6444 return VERR_IEM_INSTR_NOT_IMPLEMENTED;6445 }6446 #endif /* VBOX_WITH_NESTED_HWVIRT */6447 6206 6448 6207 /** … … 6546 6305 { 6547 6306 Log2(("hlt: Guest intercept -> #VMEXIT\n")); 6548 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6307 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6549 6308 } 6550 6309 … … 6600 6359 { 6601 6360 Log2(("monitor: Guest intercept -> #VMEXIT\n")); 6602 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6361 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6603 6362 } 6604 6363 … … 6667 6426 { 6668 6427 Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n")); 6669 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6428 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6670 6429 } 6671 6430 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT)) 6672 6431 { 6673 6432 Log2(("mwait: Guest intercept -> #VMEXIT\n")); 6674 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6433 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6675 6434 } 6676 6435 … … 6724 6483 { 6725 6484 Log2(("cpuid: Guest intercept -> #VMEXIT\n")); 6726 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6485 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6727 6486 } 6728 6487 … … 7077 6836 { 7078 6837 Log2(("xsetbv: Guest intercept -> #VMEXIT\n")); 7079 IEM_RETURN_SVM_ NST_GST_VMEXIT(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6838 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 7080 6839 } 7081 6840 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h
r66581 r67528 1218 1218 } 1219 1219 1220 #ifdef VBOX_WITH_NESTED_HWVIRT 1220 1221 /* 1221 1222 * Check SVM nested-guest IO intercept. … … 1234 1235 } 1235 1236 } 1237 #endif 1236 1238 1237 1239 OP_TYPE *puMem; … … 1288 1290 } 1289 1291 1292 #ifdef VBOX_WITH_NESTED_HWVIRT 1290 1293 /* 1291 1294 * Check SVM nested-guest IO intercept. … … 1304 1307 } 1305 1308 } 1309 #endif 1306 1310 1307 1311 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX; … … 1489 1493 } 1490 1494 1495 #ifdef VBOX_WITH_NESTED_HWVIRT 1491 1496 /* 1492 1497 * Check SVM nested-guest IO intercept. … … 1505 1510 } 1506 1511 } 1512 #endif 1507 1513 1508 1514 OP_TYPE uValue; … … 1549 1555 } 1550 1556 1557 #ifdef VBOX_WITH_NESTED_HWVIRT 1551 1558 /* 1552 1559 * Check SVM nested-guest IO intercept. … … 1565 1572 } 1566 1573 } 1574 #endif 1567 1575 1568 1576 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
Note:
See TracChangeset
for help on using the changeset viewer.