Changeset 100731 in vbox for trunk/src/VBox/VMM/include
- Timestamp:
- Jul 28, 2023 10:22:22 PM (18 months ago)
- Location:
- trunk/src/VBox/VMM/include
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/include/IEMInternal.h
r100695 r100731 81 81 * Linux, but it should be quite a bit faster for normal code. 82 82 */ 83 #if (defined( IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \83 #if (defined(__cplusplus) && defined(IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \ 84 84 || defined(DOXYGEN_RUNNING) 85 85 # define IEM_WITH_THROW_CATCH … … 843 843 typedef IEMTB const *PCIEMTB; 844 844 845 /** @name IEMBRANCHED_F_XXX - Branched indicator (IEMCPU::fTbBranched). 846 * 847 * These flags parallels IEM_CIMPL_F_BRANCH_XXX. 848 * 849 * @{ */ 850 /** Value if no branching happened recently. */ 851 #define IEMBRANCHED_F_NO UINT8_C(0x00) 852 /** Flag set if direct branch, clear if absolute or indirect. */ 853 #define IEMBRANCHED_F_DIRECT UINT8_C(0x01) 854 /** Flag set if indirect branch, clear if direct or relative. */ 855 #define IEMBRANCHED_F_INDIRECT UINT8_C(0x02) 856 /** Flag set if relative branch, clear if absolute or indirect. */ 857 #define IEMBRANCHED_F_RELATIVE UINT8_C(0x04) 858 /** Flag set if conditional branch, clear if unconditional. */ 859 #define IEMBRANCHED_F_CONDITIONAL UINT8_C(0x08) 860 /** Flag set if it's a far branch. */ 861 #define IEMBRANCHED_F_FAR UINT8_C(0x10) 862 /** Flag set (by IEM_MC_REL_JMP_XXX) if it's a zero bytes relative jump. */ 863 #define IEMBRANCHED_F_ZERO UINT8_C(0x20) 864 /** @} */ 865 845 866 846 867 /** … … 1139 1160 * This is set by a previous instruction if it modified memory or similar. */ 1140 1161 bool fTbCheckOpcodes; 1141 /** Whether we just branched and need to start a new opcode range and emit code 1142 * to do a TLB load and check them again. */ 1143 bool fTbBranched; 1162 /** Indicates whether and how we just branched - IEMBRANCHED_F_XXX. */ 1163 uint8_t fTbBranched; 1144 1164 /** Set when GCPhysInstrBuf is updated because of a page crossing. */ 1145 1165 bool fTbCrossedPage; … … 1148 1168 /** Spaced reserved for recompiler data / alignment. */ 1149 1169 bool afRecompilerStuff1[4]; 1170 /** Previous GCPhysInstrBuf value - only valid if fTbCrossedPage is set. */ 1171 RTGCPHYS GCPhysInstrBufPrev; 1172 /** Copy of IEMCPU::GCPhysInstrBuf after decoding a branch instruction. 1173 * This is used together with fTbBranched and GCVirtTbBranchSrcBuf to determin 1174 * whether a branch instruction jumps to a new page or stays within the 1175 * current one. */ 1176 RTGCPHYS GCPhysTbBranchSrcBuf; 1177 /** Copy of IEMCPU::uInstrBufPc after decoding a branch instruction. */ 1178 uint64_t GCVirtTbBranchSrcBuf; 1179 /* Alignment. */ 1180 uint64_t au64RecompilerStuff2[5]; 1150 1181 /** Threaded TB statistics: Number of instructions per TB. */ 1151 1182 STAMPROFILE StatTbThreadedInstr; … … 4386 4417 4387 4418 /** 4388 * Macro for calling iemCImplRaiseInvalidOpcode() .4389 * 4390 * This enables us to add/remove arguments and force different levels of4391 * inlining as we wish.4419 * Macro for calling iemCImplRaiseInvalidOpcode() for decode/static \#UDs. 4420 * 4421 * This is for things that will _always_ decode to an \#UD, taking the 4422 * recompiler into consideration and everything. 4392 4423 * 4393 4424 * @return Strict VBox status code. 4394 4425 */ 4395 4426 #define IEMOP_RAISE_INVALID_OPCODE_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseInvalidOpcode) 4427 4428 /** 4429 * Macro for calling iemCImplRaiseInvalidOpcode() for runtime-style \#UDs. 4430 * 4431 * Using this macro means you've got _buggy_ _code_ and are doing things that 4432 * belongs exclusively in IEMAllCImpl.cpp during decoding. 4433 * 4434 * @return Strict VBox status code. 4435 * @see IEMOP_RAISE_INVALID_OPCODE_RET 4436 */ 4437 #define IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseInvalidOpcode) 4438 4396 4439 /** @} */ 4397 4440 … … 4899 4942 4900 4943 void iemThreadedTbObsolete(PVMCPUCC pVCpu, PIEMTB pTb); 4944 4901 4945 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckMode, 4902 4946 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4903 4947 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLim, 4904 4948 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4949 4905 4950 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodes, 4906 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));4907 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb,4908 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));4909 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb,4910 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));4911 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb,4912 4951 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4913 4952 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodes, 4914 4953 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4915 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb, 4954 4955 /* Branching: */ 4956 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes, 4957 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4958 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckPcAndOpcodes, 4959 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4960 4961 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb, 4916 4962 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4917 4963 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb, 4918 4964 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4965 4966 /* Natural page crossing: */ 4967 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb, 4968 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4969 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb, 4970 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4971 4972 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb, 4973 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4919 4974 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb, 4920 4975 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4921 4976 4977 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb, 4978 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4979 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb, 4980 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4981 4982 4922 4983 4923 4984 extern const PFNIEMOP g_apfnIemInterpretOnlyOneByteMap[256]; -
trunk/src/VBox/VMM/include/IEMMc.h
r100701 r100731 1240 1240 * 1241 1241 * @{ */ 1242 /** Flag set if direct branch, clear if absolute or indirect. */ 1243 #define IEM_CIMPL_F_BRANCH_DIRECT RT_BIT_32(0) 1244 /** Flag set if indirect branch, clear if direct or relative. 1245 * This is also used for all system control transfers (SYSCALL, SYSRET, INT, ++) 1246 * as well as for return instructions (RET, IRET, RETF). */ 1247 #define IEM_CIMPL_F_BRANCH_INDIRECT RT_BIT_32(1) 1248 /** Flag set if relative branch, clear if absolute or indirect. */ 1249 #define IEM_CIMPL_F_BRANCH_RELATIVE RT_BIT_32(2) 1250 /** Flag set if conditional branch, clear if unconditional. */ 1251 #define IEM_CIMPL_F_BRANCH_CONDITIONAL RT_BIT_32(3) 1252 /** Flag set if it's a far branch (changes CS). */ 1253 #define IEM_CIMPL_F_BRANCH_FAR RT_BIT_32(4) 1254 /** Convenience: Testing any kind of branch. */ 1255 #define IEM_CIMPL_F_BRANCH_ANY (IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_RELATIVE) 1256 1242 1257 /** Execution flags may change (IEMCPU::fExec). */ 1243 #define IEM_CIMPL_F_MODE RT_BIT_32(0) 1244 /** Unconditional direct branches (changes RIP, maybe CS). */ 1245 #define IEM_CIMPL_F_BRANCH_UNCOND RT_BIT_32(1) 1246 /** Conditional direct branch (may change RIP, maybe CS). */ 1247 #define IEM_CIMPL_F_BRANCH_COND RT_BIT_32(2) 1248 /** Indirect unconditional branch (changes RIP, maybe CS). 1249 * 1250 * This is used for all system control transfers (SYSCALL, SYSRET, INT, ++) as 1251 * well as for return instructions (RET, IRET, RETF). 1252 * 1253 * Since the INTO instruction is currently the only indirect branch instruction 1254 * that is conditional (depends on the overflow flag), that instruction will 1255 * have both IEM_CIMPL_F_BRANCH_INDIR and IEM_CIMPL_F_BRANCH_COND set. All 1256 * other branch instructions will have exactly one of the branch flags set. */ 1257 #define IEM_CIMPL_F_BRANCH_INDIR RT_BIT_32(3) 1258 #define IEM_CIMPL_F_MODE RT_BIT_32(5) 1258 1259 /** May change significant portions of RFLAGS. */ 1259 #define IEM_CIMPL_F_RFLAGS RT_BIT_32(4)1260 #define IEM_CIMPL_F_RFLAGS RT_BIT_32(6) 1260 1261 /** May change the status bits (X86_EFL_STATUS_BITS) in RFLAGS . */ 1261 #define IEM_CIMPL_F_STATUS_FLAGS RT_BIT_32(5)1262 #define IEM_CIMPL_F_STATUS_FLAGS RT_BIT_32(7) 1262 1263 /** May trigger a VM exit. */ 1263 #define IEM_CIMPL_F_VMEXIT RT_BIT_32(6)1264 #define IEM_CIMPL_F_VMEXIT RT_BIT_32(8) 1264 1265 /** May modify FPU state. */ 1265 #define IEM_CIMPL_F_FPU RT_BIT_32(7)1266 #define IEM_CIMPL_F_FPU RT_BIT_32(9) 1266 1267 /** REP prefixed instruction which may yield before updating PC. */ 1267 #define IEM_CIMPL_F_REP RT_BIT_32(8)1268 #define IEM_CIMPL_F_REP RT_BIT_32(10) 1268 1269 /** Force end of TB after the instruction. */ 1269 #define IEM_CIMPL_F_END_TB RT_BIT_32(9)1270 #define IEM_CIMPL_F_END_TB RT_BIT_32(11) 1270 1271 /** Convenience: Raise exception (technically unnecessary, since it shouldn't return VINF_SUCCESS). */ 1271 #define IEM_CIMPL_F_XCPT (IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_UNCOND | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT) 1272 /** Convenience: Testing any kind of branch. */ 1273 #define IEM_CIMPL_F_BRANCH_ANY (IEM_CIMPL_F_BRANCH_UNCOND | IEM_CIMPL_F_BRANCH_COND | IEM_CIMPL_F_BRANCH_INDIR) 1272 #define IEM_CIMPL_F_XCPT \ 1273 (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT) 1274 1274 /** @} */ 1275 1275 -
trunk/src/VBox/VMM/include/IEMOpHlp.h
r100714 r100731 296 296 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 297 297 /** This instruction raises an \#UD in real and V8086 mode or when not using a 298 * 64-bit code segment when in long mode (applicable to all VMX instructions 299 * except VMCALL). 300 */ 301 #define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \ 298 * 64-bit code segment when in long mode (applicable to all VMX instructions 299 * except VMCALL). 300 * 301 * @todo r=bird: This is not recompiler friendly. The scenario with 302 * 16-bit/32-bit code running in long mode doesn't fit at all. 303 */ 304 # define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \ 302 305 do \ 303 306 { \ … … 318 321 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \ 319 322 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \ 320 IEMOP_RAISE_INVALID_OPCODE_R ET();\323 IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET(); /** @todo This doesn't work. */ \ 321 324 } \ 322 325 } \ … … 340 343 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \ 341 344 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \ 342 IEMOP_RAISE_INVALID_OPCODE_R ET();\345 IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET(); /** @todo This doesn't work. */ \ 343 346 } \ 344 347 } while (0)
Note:
See TracChangeset
for help on using the changeset viewer.