Changeset 68362 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Aug 10, 2017 9:39:22 AM (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
r68227 r68362 96 96 IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) 97 97 { 98 #ifndef IN_RING399 RT_NOREF(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);100 AssertMsgFailed(("iemSvmVmexit: Bad context\n"));101 return VERR_INTERNAL_ERROR_5;102 #else103 98 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx) 104 99 || uExitCode == SVM_EXIT_INVALID) … … 261 256 AssertMsgFailed(("iemSvmVmexit: Unexpected SVM-exit failure uExitCode=%#RX64\n", uExitCode)); 262 257 return VERR_SVM_IPE_5; 263 #endif264 258 } 265 259 … … 282 276 IEM_STATIC VBOXSTRICTRC iemSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbInstr, RTGCPHYS GCPhysVmcb) 283 277 { 284 #ifndef IN_RING3285 RT_NOREF(pVCpu, pCtx, cbInstr, GCPhysVmcb);286 return VINF_EM_RESCHEDULE_REM;287 #else288 278 PVM pVM = pVCpu->CTX_SUFF(pVM); 289 279 LogFlow(("iemSvmVmrun\n")); … … 637 627 Log(("iemSvmVmrun: Failed to read nested-guest VMCB at %#RGp (rc=%Rrc) -> #VMEXIT\n", GCPhysVmcb, rc)); 638 628 return rc; 639 #endif640 629 } 641 630 … … 934 923 Log3(("iemSvmHandleIOIntercept: u16Port=%#x (%u)\n", u16Port, u16Port)); 935 924 936 #if 1937 925 SVMIOIOEXITINFO IoExitInfo; 938 926 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); … … 945 933 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_IOIO, IoExitInfo.u, pCtx->rip + cbInstr); 946 934 } 947 #else948 /*949 * The IOPM layout:950 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or951 * two 4K pages.952 *953 * For IO instructions that access more than a single byte, the permission bits954 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.955 *956 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),957 * we need 3 extra bits beyond the second 4K page.958 */959 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);960 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };961 962 uint16_t const offIopm = u16Port >> 3;963 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];964 uint8_t const cShift = u16Port - (offIopm << 3);965 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);966 967 uint8_t const *pbIopm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap);968 Assert(pbIopm);969 pbIopm += offIopm;970 uint16_t const u16Iopm = *(uint16_t *)pbIopm;971 if (u16Iopm & fIopmMask)972 {973 static const uint32_t s_auIoOpSize[] =974 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };975 976 static const uint32_t s_auIoAddrSize[] =977 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };978 979 SVMIOIOEXITINFO IoExitInfo;980 IoExitInfo.u = s_auIoOpSize[cbReg & 7];981 IoExitInfo.u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];982 IoExitInfo.n.u1STR = iemSvmVmexitfStrIo;983 IoExitInfo.n.u1REP = fRep;984 IoExitInfo.n.u3SEG = iEffSeg & 7;985 IoExitInfo.n.u1Type = enmIoType;986 IoExitInfo.n.u16Port = u16Port;987 988 Log3(("iemSvmHandleIOIntercept: u16Port=%#x (%u) offIoPm=%u fSizeMask=%#x cShift=%u fIopmMask=%#x -> #VMEXIT\n",989 u16Port, u16Port, offIopm, fSizeMask, cShift, fIopmMask));990 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_IOIO, IoExitInfo.u, pCtx->rip + cbInstr);991 }992 #endif993 935 994 936 /** @todo remove later (for debugging as VirtualBox always traps all IO … … 1066 1008 IEM_CIMPL_DEF_0(iemCImpl_vmrun) 1067 1009 { 1068 #if ndef IN_RING31010 #if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3) 1069 1011 RT_NOREF2(pVCpu, cbInstr); 1070 return VINF_EM_R ESCHEDULE_REM;1012 return VINF_EM_RAW_EMULATE_INSTR; 1071 1013 #else 1072 1014 LogFlow(("iemCImpl_vmrun\n")); … … 1129 1071 IEM_CIMPL_DEF_0(iemCImpl_vmload) 1130 1072 { 1131 #if ndef IN_RING31073 #if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3) 1132 1074 RT_NOREF2(pVCpu, cbInstr); 1133 1075 return VINF_EM_RAW_EMULATE_INSTR; … … 1184 1126 IEM_CIMPL_DEF_0(iemCImpl_vmsave) 1185 1127 { 1186 #if ndef IN_RING31128 #if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3) 1187 1129 RT_NOREF2(pVCpu, cbInstr); 1188 1130 return VINF_EM_RAW_EMULATE_INSTR; … … 1242 1184 IEM_CIMPL_DEF_0(iemCImpl_clgi) 1243 1185 { 1244 #if ndef IN_RING31186 #if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3) 1245 1187 RT_NOREF2(pVCpu, cbInstr); 1246 return VINF_EM_R ESCHEDULE_REM;1188 return VINF_EM_RAW_EMULATE_INSTR; 1247 1189 #else 1248 1190 LogFlow(("iemCImpl_clgi\n")); … … 1257 1199 pCtx->hwvirt.svm.fGif = 0; 1258 1200 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1259 # if defined(VBOX_WITH_NESTED_HWVIRT) && defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3) 1201 1202 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3) 1260 1203 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true); 1261 1204 # else … … 1271 1214 IEM_CIMPL_DEF_0(iemCImpl_stgi) 1272 1215 { 1273 #if ndef IN_RING31216 #if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3) 1274 1217 RT_NOREF2(pVCpu, cbInstr); 1275 return VINF_EM_R ESCHEDULE_REM;1218 return VINF_EM_RAW_EMULATE_INSTR; 1276 1219 #else 1277 1220 LogFlow(("iemCImpl_stgi\n")); … … 1286 1229 pCtx->hwvirt.svm.fGif = 1; 1287 1230 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1288 # if defined(VBOX_WITH_NESTED_HWVIRT) && defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3) 1231 1232 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3) 1289 1233 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false); 1290 1234 # else
Note:
See TracChangeset
for help on using the changeset viewer.