- Timestamp:
- Jun 24, 2023 2:48:28 AM (19 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r100266 r100277 818 818 * calling thread. 819 819 * @param pvDst Where to return the bytes. 820 * @param cbDst Number of bytes to read. 821 * 822 * @todo Make cbDst = 0 a way of initializing pbInstrBuf? 820 * @param cbDst Number of bytes to read. A value of zero is 821 * allowed for initializing pbInstrBuf (the 822 * recompiler does this). In this case it is best 823 * to set pbInstrBuf to NULL prior to the call. 823 824 */ 824 825 void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP … … 1060 1061 /* Do the reading. */ 1061 1062 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead); 1062 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), 1063 pvDst, cbToRead, PGMACCESSORIGIN_IEM); 1064 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 1065 { /* likely */ } 1066 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict)) 1063 if (cbToRead > 0) 1067 1064 { 1068 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n", 1069 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead)); 1070 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1071 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict))); 1065 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), 1066 pvDst, cbToRead, PGMACCESSORIGIN_IEM); 1067 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 1068 { /* likely */ } 1069 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict)) 1070 { 1071 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n", 1072 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead)); 1073 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1074 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict))); 1075 } 1076 else 1077 { 1078 Log((RT_SUCCESS(rcStrict) 1079 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n" 1080 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n", 1081 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead)); 1082 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)); 1083 } 1072 1084 } 1073 else 1074 { 1075 Log((RT_SUCCESS(rcStrict) 1076 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n" 1077 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n", 1078 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead)); 1079 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)); 1080 } 1081 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead; 1085 1086 /* Update the state and probably return. */ 1087 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK); 1088 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr); 1089 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead; 1090 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr; 1091 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; 1092 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys; 1093 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK; 1094 pVCpu->iem.s.pbInstrBuf = NULL; 1082 1095 if (cbToRead == cbDst) 1083 1096 return; -
trunk/src/VBox/VMM/VMMAll/IEMAllThreadedFunctions.cpp
r100231 r100277 610 610 } 611 611 612 613 /** 614 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM, 615 * raising a \#GP(0) if this isn't the case. 616 */ 617 static IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLim, 618 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)) 619 { 620 uint32_t const cbInstr = (uint32_t)uParam0; 621 if (pVCpu->cpum.GstCtx.eip - pVCpu->cpum.GstCtx.cs.u32Limit >= cbInstr) 622 return VINF_SUCCESS; 623 Log(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", 624 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, cbInstr, pVCpu->cpum.GstCtx.cs.u32Limit)); 625 RT_NOREF(uParam1, uParam2); 626 return iemRaiseGeneralProtectionFault0(pVCpu); 627 } 628 612 629 /* 613 630 * The threaded functions. -
trunk/src/VBox/VMM/VMMAll/IEMAllThreadedPython.py
r100266 r100277 879 879 880 880 aoStmts = [ 881 iai.McCppGeneric('IEM_MC2_ PRE_EMIT_CALLS();', cchIndent = cchIndent), # Serves asa hook for various stuff.881 iai.McCppGeneric('IEM_MC2_BEGIN_EMIT_CALLS();', cchIndent = cchIndent), # Scope and a hook for various stuff. 882 882 iai.McCppGeneric(sCode, cchIndent = cchIndent), 883 883 ]; … … 889 889 cchIndent = cchIndent)); 890 890 891 aoStmts.append(iai.McCppGeneric('IEM_MC2_END_EMIT_CALLS();', cchIndent = cchIndent)); # For closing the scope. 891 892 return aoStmts; 892 893 … … 1247 1248 ' */' 1248 1249 ' kIemThreadedFunc_CheckMode,', 1250 ' kIemThreadedFunc_CheckCsLim,', 1249 1251 ]; 1250 1252 iThreadedFunction = 1; … … 1405 1407 + ' */' 1406 1408 + ' iemThreadedFunc_BltIn_CheckMode,\n' 1409 + ' iemThreadedFunc_BltIn_CheckCsLim,\n' 1407 1410 ); 1408 1411 iThreadedFunction = 1; … … 1437 1440 + ' */' 1438 1441 + ' "BltIn_CheckMode",\n' 1442 + ' "BltIn_CheckCsLim",\n' 1439 1443 ); 1440 1444 iThreadedFunction = 1; -
trunk/src/VBox/VMM/VMMAll/IEMAllThreadedRecompiler.cpp
r100266 r100277 123 123 uint16_t uUnused0; 124 124 125 /** Offset into IEMTB::pabOpcodes. */ 126 uint16_t offOpcode; 125 127 /** The opcode length. */ 126 128 uint8_t cbOpcode; 127 /** The opcode chunk number. 128 * @note sketches for discontiguous opcode support */ 129 uint8_t idxOpcodeChunk; 130 /** The offset into the opcode chunk of this function. 131 * @note sketches for discontiguous opcode support */ 132 uint16_t offOpcodeChunk; 129 uint8_t uUnused1; 133 130 134 131 /** Generic parameters. */ … … 188 185 /** Pointer to the opcode bytes this block was recompiled from. */ 189 186 uint8_t *pabOpcodes; 187 188 #if 0 189 struct 190 { 191 uint16_t offOpcodes; 192 uint16_t cbOpcodes; 193 } aRanges; 194 195 /** Physical pages that the . */ 196 RTGCPHYS aGCPhysPgs[2]; 197 #endif 198 190 199 } IEMTB; 191 200 … … 214 223 #endif 215 224 216 #define IEM_MC2_PRE_EMIT_CALLS() do { \ 225 #define IEM_MC2_BEGIN_EMIT_CALLS() \ 226 { \ 227 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \ 217 228 AssertMsg(pVCpu->iem.s.offOpcode == IEM_GET_INSTR_LEN(pVCpu), \ 218 229 ("%u vs %u (%04x:%08RX64)\n", pVCpu->iem.s.offOpcode, IEM_GET_INSTR_LEN(pVCpu), \ 219 230 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); \ 220 } while (0) 231 /** @todo check for cross page stuff */ \ 232 if (!(pTb->fFlags & IEMTB_F_CS_LIM_CHECKS)) \ 233 { /* likely */ } \ 234 else \ 235 { \ 236 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \ 237 pCall->enmFunction = kIemThreadedFunc_CheckCsLim; \ 238 pCall->offOpcode = pTb->cbOpcodes; \ 239 pCall->auParams[0] = pCall->cbOpcode = IEM_GET_INSTR_LEN(pVCpu); \ 240 pCall->auParams[1] = 0; \ 241 pCall->auParams[2] = 0; \ 242 } \ 243 do { } while (0) 244 221 245 #define IEM_MC2_EMIT_CALL_0(a_enmFunction) do { \ 222 246 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \ 223 247 \ 224 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \225 248 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \ 226 249 pCall->enmFunction = a_enmFunction; \ 250 pCall->offOpcode = pTb->cbOpcodes; \ 227 251 pCall->cbOpcode = IEM_GET_INSTR_LEN(pVCpu); \ 228 252 pCall->auParams[0] = 0; \ … … 234 258 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \ 235 259 \ 236 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \237 260 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \ 238 261 pCall->enmFunction = a_enmFunction; \ 262 pCall->offOpcode = pTb->cbOpcodes; \ 239 263 pCall->cbOpcode = IEM_GET_INSTR_LEN(pVCpu); \ 240 264 pCall->auParams[0] = a_uArg0; \ … … 247 271 uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \ 248 272 \ 249 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \250 273 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \ 251 274 pCall->enmFunction = a_enmFunction; \ 275 pCall->offOpcode = pTb->cbOpcodes; \ 252 276 pCall->cbOpcode = IEM_GET_INSTR_LEN(pVCpu); \ 253 277 pCall->auParams[0] = a_uArg0; \ … … 261 285 uint64_t const uArg2Check = (a_uArg2); RT_NOREF(uArg2Check); \ 262 286 \ 263 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \264 287 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \ 265 288 pCall->enmFunction = a_enmFunction; \ 289 pCall->offOpcode = pTb->cbOpcodes; \ 266 290 pCall->cbOpcode = IEM_GET_INSTR_LEN(pVCpu); \ 267 291 pCall->auParams[0] = a_uArg0; \ 268 292 pCall->auParams[1] = a_uArg1; \ 269 293 pCall->auParams[2] = a_uArg2; \ 294 } while (0) 295 296 #define IEM_MC2_END_EMIT_CALLS() \ 270 297 } while (0) 271 298 … … 907 934 DECL_FORCE_INLINE(void) iemThreadedCompileInitOpcodeFetching(PVMCPUCC pVCpu) 908 935 { 909 // // // // // // // // // // figure this out // // // // 910 pVCpu->iem.s.pbInstrBuf = NULL; 911 pVCpu->iem.s.offInstrNextByte = 0; 912 pVCpu->iem.s.offCurInstrStart = 0; 936 /* Almost everything is done by iemGetPcWithPhysAndCode() already. We just need to initialize the index into abOpcode. */ 913 937 #ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 914 938 pVCpu->iem.s.offOpcode = 0; 915 #endif 916 #ifdef VBOX_STRICT 917 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS; 918 pVCpu->iem.s.cbInstrBuf = UINT16_MAX; 919 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX; 920 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff); 921 #endif 922 // // // // // // // // // // // // // // // // // // // 939 #else 940 RT_NOREF(pVCpu); 941 #endif 923 942 } 924 943 … … 994 1013 * Allocate a new translation block. 995 1014 */ 996 if (!(fExtraFlags & IEMTB_F_RIP_CHECKS)) 997 { /* likely */ } 998 else if ( !IEM_IS_64BIT_CODE(pVCpu) 999 ? pVCpu->cpum.GstCtx.eip <= pVCpu->cpum.GstCtx.cs.u32Limit 1000 : IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.rip)) 1001 { /* likely */ } 1002 else 1003 return IEMExecOne(pVCpu); 1004 fExtraFlags |= IEMTB_F_STATE_COMPILING; 1005 1006 PIEMTB pTb = iemThreadedTbAlloc(pVM, pVCpu, GCPhysPc, fExtraFlags); 1015 PIEMTB pTb = iemThreadedTbAlloc(pVM, pVCpu, GCPhysPc, fExtraFlags | IEMTB_F_STATE_COMPILING); 1007 1016 AssertReturn(pTb, VERR_IEM_TB_ALLOC_FAILED); 1008 1017 … … 1144 1153 /** 1145 1154 * This is called when the PC doesn't match the current pbInstrBuf. 1146 */ 1147 static RTGCPHYS iemGetPcWithPhysAndCodeMissed(PVMCPUCC pVCpu, uint64_t const uPc) 1148 { 1149 /** @todo see iemOpcodeFetchBytesJmp */ 1150 pVCpu->iem.s.pbInstrBuf = NULL; 1151 1155 * 1156 * Upon return, we're ready for opcode fetching. But please note that 1157 * pbInstrBuf can be NULL iff the memory doesn't have readable backing (i.e. 1158 * MMIO or unassigned). 1159 */ 1160 static RTGCPHYS iemGetPcWithPhysAndCodeMissed(PVMCPUCC pVCpu) 1161 { 1162 pVCpu->iem.s.pbInstrBuf = NULL; 1163 pVCpu->iem.s.offCurInstrStart = 0; 1152 1164 pVCpu->iem.s.offInstrNextByte = 0; 1153 pVCpu->iem.s.offCurInstrStart = 0; 1154 pVCpu->iem.s.cbInstrBuf = 0; 1155 pVCpu->iem.s.cbInstrBufTotal = 0; 1156 1157 uint8_t bIgn; 1158 iemOpcodeFetchBytesJmp(pVCpu, 1, &bIgn); 1159 1160 uint64_t off = uPc - pVCpu->iem.s.uInstrBufPc; 1161 if (off < pVCpu->iem.s.cbInstrBufTotal) 1162 { 1163 pVCpu->iem.s.offInstrNextByte = (uint32_t)off; 1164 pVCpu->iem.s.offCurInstrStart = (uint16_t)off; 1165 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal) 1166 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15; 1167 else 1168 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal; 1169 1170 return pVCpu->iem.s.GCPhysInstrBuf + off; 1171 } 1172 1173 AssertFailed(); 1174 RT_NOREF(uPc); 1175 return NIL_RTGCPHYS; 1165 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); 1166 return pVCpu->iem.s.GCPhysInstrBuf + pVCpu->iem.s.offCurInstrStart; 1176 1167 } 1177 1168 … … 1180 1171 DECL_FORCE_INLINE_THROW(RTGCPHYS) iemGetPcWithPhysAndCode(PVMCPUCC pVCpu) 1181 1172 { 1182 /* Set uCurTbStartPc to RIP and calc the effective PC. */ 1173 /* 1174 * Set uCurTbStartPc to RIP and calc the effective PC. 1175 */ 1183 1176 uint64_t uPc = pVCpu->cpum.GstCtx.rip; 1184 1177 pVCpu->iem.s.uCurTbStartPc = uPc; … … 1186 1179 uPc += pVCpu->cpum.GstCtx.cs.u64Base; 1187 1180 1181 /* 1182 * Advance within the current buffer (PAGE) when possible. 1183 */ 1188 1184 if (pVCpu->iem.s.pbInstrBuf) 1189 1185 { … … 1201 1197 } 1202 1198 } 1203 return iemGetPcWithPhysAndCodeMissed(pVCpu , uPc);1199 return iemGetPcWithPhysAndCodeMissed(pVCpu); 1204 1200 } 1205 1201 … … 1219 1215 */ 1220 1216 if (IEM_IS_64BIT_CODE(pVCpu)) 1221 { 1222 if (RT_LIKELY( IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.rip) 1223 && IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.rip + 256))) 1224 return IEMTB_F_TYPE_THREADED; 1225 } 1226 else 1227 { 1228 if (RT_LIKELY( pVCpu->cpum.GstCtx.eip < pVCpu->cpum.GstCtx.cs.u32Limit 1229 && (uint64_t)256 + pVCpu->cpum.GstCtx.eip < pVCpu->cpum.GstCtx.cs.u32Limit)) 1230 return IEMTB_F_TYPE_THREADED; 1231 } 1232 return IEMTB_F_RIP_CHECKS | IEMTB_F_TYPE_THREADED; 1217 return IEMTB_F_TYPE_THREADED; 1218 1219 if (RT_LIKELY( pVCpu->cpum.GstCtx.eip < pVCpu->cpum.GstCtx.cs.u32Limit 1220 && pVCpu->cpum.GstCtx.eip - pVCpu->cpum.GstCtx.cs.u32Limit >= X86_PAGE_SIZE)) 1221 return IEMTB_F_TYPE_THREADED; 1222 1223 return IEMTB_F_TYPE_THREADED | IEMTB_F_CS_LIM_CHECKS; 1233 1224 } 1234 1225 … … 1265 1256 PIEMTB pTb = NULL; 1266 1257 VBOXSTRICTRC rcStrict; 1267 #ifdef IEM_WITH_SETJMP1268 1258 IEM_TRY_SETJMP(pVCpu, rcStrict) 1269 #endif1270 1259 { 1271 1260 for (;;) … … 1300 1289 } 1301 1290 } 1302 #ifdef IEM_WITH_SETJMP1303 1291 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict); 1304 1292 { … … 1306 1294 if (pVCpu->iem.s.cActiveMappings > 0) 1307 1295 iemMemRollback(pVCpu); 1308 if (pTb) 1309 return rcStrict; 1310 return iemThreadedCompileLongJumped(pVM, pVCpu, rcStrict); 1296 1297 /* If pTb isn't NULL we're in iemThreadedTbExec. */ 1298 if (!pTb) 1299 { 1300 /* If pCurTbR3 is NULL, we're in iemGetPcWithPhysAndCode.*/ 1301 pTb = pVCpu->iem.s.pCurTbR3; 1302 if (pTb) 1303 { 1304 /* If the pCurTbR3 block is in compiling state, we're in iemThreadedCompile, 1305 otherwise it's iemThreadedTbExec inside iemThreadedCompile (compile option). */ 1306 if ((pTb->fFlags & IEMTB_F_STATE_MASK) == IEMTB_F_STATE_COMPILING) 1307 return iemThreadedCompileLongJumped(pVM, pVCpu, rcStrict); 1308 } 1309 } 1310 return rcStrict; 1311 1311 } 1312 1312 IEM_CATCH_LONGJMP_END(pVCpu); 1313 #endif 1314 } 1315 } 1316 1313 } 1314 } 1315 -
trunk/src/VBox/VMM/include/IEMInternal.h
r100266 r100277 670 670 #define IEMTB_F_STATE_OBSOLETE UINT32_C(0x0c000000) 671 671 672 /** Checks that EIP/IP is wihin CS.LIM and that RIP is canonical before each673 * instruction. Used when we're close the limit before starting a TB, as674 * determined byiemGetTbFlagsForCurrentPc(). */675 #define IEMTB_F_ RIP_CHECKSUINT32_C(0x0c000000)672 /** Checks that EIP/IP is wihin CS.LIM before each instruction. Used when 673 * we're close the limit before starting a TB, as determined by 674 * iemGetTbFlagsForCurrentPc(). */ 675 #define IEMTB_F_CS_LIM_CHECKS UINT32_C(0x0c000000) 676 676 677 677 /** Mask of the IEMTB_F_XXX flags that are part of the TB lookup key.
Note:
See TracChangeset
for help on using the changeset viewer.