Changeset 108368 in vbox for trunk/src/VBox/VMM/include
- Timestamp:
- Feb 25, 2025 1:17:32 PM (2 months ago)
- svn:sync-xref-src-repo-rev:
- 167729
- Location:
- trunk/src/VBox/VMM/include
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/include/IEMInternal.h
r108298 r108368 729 729 kIemTlbTraceType_LargeScan, 730 730 kIemTlbTraceType_Flush, 731 kIemTlbTraceType_FlushGlobal, 731 kIemTlbTraceType_FlushGlobal, /**< x86 specific */ 732 732 kIemTlbTraceType_Load, 733 kIemTlbTraceType_LoadGlobal, 734 kIemTlbTraceType_Load_Cr0, /**< x86 specific */735 kIemTlbTraceType_Load_Cr3, /**< x86 specific */736 kIemTlbTraceType_Load_Cr4, /**< x86 specific */737 kIemTlbTraceType_Load_Efer, /**< x86 specific */733 kIemTlbTraceType_LoadGlobal, /**< x86 specific */ 734 kIemTlbTraceType_Load_Cr0, /**< x86 specific */ 735 kIemTlbTraceType_Load_Cr3, /**< x86 specific */ 736 kIemTlbTraceType_Load_Cr4, /**< x86 specific */ 737 kIemTlbTraceType_Load_Efer, /**< x86 specific */ 738 738 kIemTlbTraceType_Irq, 739 739 kIemTlbTraceType_Xcpt, 740 kIemTlbTraceType_IRet, /**< x86 specific */740 kIemTlbTraceType_IRet, /**< x86 specific */ 741 741 kIemTlbTraceType_Tb_Compile, 742 742 kIemTlbTraceType_Tb_Exec_Threaded, … … 995 995 * 996 996 * @{ */ 997 /** Mode: The block target mode mask. */ 998 #define IEM_F_MODE_MASK UINT32_C(0x0000001f) 999 /** Mode: The IEMMODE part of the IEMTB_F_MODE_MASK value. */ 1000 #define IEM_F_MODE_CPUMODE_MASK UINT32_C(0x00000003) 997 /** Mode: The block target mode mask. 998 * X86: CPUMODE plus protected, v86 and pre-386 indicators. 999 * ARM: PSTATE.nRW | PSTATE.T | PSTATE.EL. This doesn't quite overlap with 1000 * SPSR_ELx when in AARCH32 mode, but that's life. */ 1001 #if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING) 1002 # define IEM_F_MODE_MASK UINT32_C(0x0000001f) 1003 #elif defined(VBOX_VMM_TARGET_ARMV8) 1004 # define IEM_F_MODE_MASK UINT32_C(0x0000003c) 1005 #endif 1006 1007 #if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING) 1008 /** X86 Mode: The IEMMODE part of the IEMTB_F_MODE_MASK value. */ 1009 # define IEM_F_MODE_X86_CPUMODE_MASK UINT32_C(0x00000003) 1001 1010 /** X86 Mode: Bit used to indicating pre-386 CPU in 16-bit mode (for eliminating 1002 1011 * conditional in EIP/IP updating), and flat wide open CS, SS, DS, and ES in 1003 1012 * 32-bit mode (for simplifying most memory accesses). */ 1004 # define IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK UINT32_C(0x00000004)1013 # define IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK UINT32_C(0x00000004) 1005 1014 /** X86 Mode: Bit indicating protected mode, real mode (or SMM) when not set. */ 1006 # define IEM_F_MODE_X86_PROT_MASKUINT32_C(0x00000008)1015 # define IEM_F_MODE_X86_PROT_MASK UINT32_C(0x00000008) 1007 1016 /** X86 Mode: Bit used to indicate virtual 8086 mode (only 16-bit). */ 1008 # define IEM_F_MODE_X86_V86_MASKUINT32_C(0x00000010)1017 # define IEM_F_MODE_X86_V86_MASK UINT32_C(0x00000010) 1009 1018 1010 1019 /** X86 Mode: 16-bit on 386 or later. */ 1011 # define IEM_F_MODE_X86_16BITUINT32_C(0x00000000)1020 # define IEM_F_MODE_X86_16BIT UINT32_C(0x00000000) 1012 1021 /** X86 Mode: 80286, 80186 and 8086/88 targetting blocks (EIP update opt). */ 1013 # define IEM_F_MODE_X86_16BIT_PRE_386UINT32_C(0x00000004)1022 # define IEM_F_MODE_X86_16BIT_PRE_386 UINT32_C(0x00000004) 1014 1023 /** X86 Mode: 16-bit protected mode on 386 or later. */ 1015 # define IEM_F_MODE_X86_16BIT_PROTUINT32_C(0x00000008)1024 # define IEM_F_MODE_X86_16BIT_PROT UINT32_C(0x00000008) 1016 1025 /** X86 Mode: 16-bit protected mode on 386 or later. */ 1017 # define IEM_F_MODE_X86_16BIT_PROT_PRE_386UINT32_C(0x0000000c)1026 # define IEM_F_MODE_X86_16BIT_PROT_PRE_386 UINT32_C(0x0000000c) 1018 1027 /** X86 Mode: 16-bit virtual 8086 protected mode (on 386 or later). */ 1019 # define IEM_F_MODE_X86_16BIT_PROT_V86UINT32_C(0x00000018)1028 # define IEM_F_MODE_X86_16BIT_PROT_V86 UINT32_C(0x00000018) 1020 1029 1021 1030 /** X86 Mode: 32-bit on 386 or later. */ 1022 # define IEM_F_MODE_X86_32BITUINT32_C(0x00000001)1031 # define IEM_F_MODE_X86_32BIT UINT32_C(0x00000001) 1023 1032 /** X86 Mode: 32-bit mode with wide open flat CS, SS, DS and ES. */ 1024 # define IEM_F_MODE_X86_32BIT_FLATUINT32_C(0x00000005)1033 # define IEM_F_MODE_X86_32BIT_FLAT UINT32_C(0x00000005) 1025 1034 /** X86 Mode: 32-bit protected mode. */ 1026 # define IEM_F_MODE_X86_32BIT_PROTUINT32_C(0x00000009)1035 # define IEM_F_MODE_X86_32BIT_PROT UINT32_C(0x00000009) 1027 1036 /** X86 Mode: 32-bit protected mode with wide open flat CS, SS, DS and ES. */ 1028 # define IEM_F_MODE_X86_32BIT_PROT_FLATUINT32_C(0x0000000d)1037 # define IEM_F_MODE_X86_32BIT_PROT_FLAT UINT32_C(0x0000000d) 1029 1038 1030 1039 /** X86 Mode: 64-bit (includes protected, but not the flat bit). */ 1031 # define IEM_F_MODE_X86_64BITUINT32_C(0x0000000a)1040 # define IEM_F_MODE_X86_64BIT UINT32_C(0x0000000a) 1032 1041 1033 1042 /** X86 Mode: Checks if @a a_fExec represent a FLAT mode. */ 1034 # define IEM_F_MODE_X86_IS_FLAT(a_fExec)( ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT \1043 # define IEM_F_MODE_X86_IS_FLAT(a_fExec) ( ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT \ 1035 1044 || ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT \ 1036 1045 || ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT) 1046 1047 /** X86: The current protection level (CPL) shift factor. */ 1048 # define IEM_F_X86_CPL_SHIFT 8 1049 /** X86: The current protection level (CPL) mask. */ 1050 # define IEM_F_X86_CPL_MASK UINT32_C(0x00000300) 1051 /** X86: The current protection level (CPL) shifted mask. */ 1052 # define IEM_F_X86_CPL_SMASK UINT32_C(0x00000003) 1053 1054 /** X86: Alignment checks enabled (CR0.AM=1 & EFLAGS.AC=1). */ 1055 # define IEM_F_X86_AC UINT32_C(0x00080000) 1056 1057 /** X86 execution context. 1058 * The IEM_F_X86_CTX_XXX values are individual flags that can be combined (with 1059 * the exception of IEM_F_X86_CTX_NORMAL). This allows running VMs from SMM 1060 * mode. */ 1061 # define IEM_F_X86_CTX_MASK UINT32_C(0x0000f000) 1062 /** X86 context: Plain regular execution context. */ 1063 # define IEM_F_X86_CTX_NORMAL UINT32_C(0x00000000) 1064 /** X86 context: VT-x enabled. */ 1065 # define IEM_F_X86_CTX_VMX UINT32_C(0x00001000) 1066 /** X86 context: AMD-V enabled. */ 1067 # define IEM_F_X86_CTX_SVM UINT32_C(0x00002000) 1068 /** X86 context: In AMD-V or VT-x guest mode. */ 1069 # define IEM_F_X86_CTX_IN_GUEST UINT32_C(0x00004000) 1070 /** X86 context: System management mode (SMM). */ 1071 # define IEM_F_X86_CTX_SMM UINT32_C(0x00008000) 1072 1073 /** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in 1074 * iemRegFinishClearingRF() most for most situations (CPUMCTX_DBG_HIT_DRX_MASK 1075 * and CPUMCTX_DBG_DBGF_MASK are covered by the IEM_F_PENDING_BRK_XXX bits 1076 * alread). */ 1077 1078 /** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in 1079 * iemRegFinishClearingRF() most for most situations 1080 * (CPUMCTX_DBG_HIT_DRX_MASK and CPUMCTX_DBG_DBGF_MASK are covered by 1081 * the IEM_F_PENDING_BRK_XXX bits alread). */ 1082 1083 #endif /* X86 || doxygen */ 1084 1085 #if defined(VBOX_VMM_TARGET_ARMV8) || defined(DOXYGEN_RUNNING) 1086 /** ARM Mode: Exception (privilege) level shift count. */ 1087 # define IEM_F_MODE_ARM_EL_SHIFT 2 1088 /** ARM Mode: Exception (privilege) level mask. */ 1089 # define IEM_F_MODE_ARM_EL_MASK UINT32_C(0x0000000c) 1090 /** ARM Mode: Exception (privilege) level shifted down mask. */ 1091 # define IEM_F_MODE_ARM_EL_SMASK UINT32_C(0x00000003) 1092 /** ARM Mode: 32-bit (set) or 64-bit (clear) indicator (SPSR_ELx.M[4]). */ 1093 # define IEM_F_MODE_ARM_32BIT UINT32_C(0x00000010) 1094 /** ARM Mode: Thumb mode indicator (SPSR_ELx.T). */ 1095 # define IEM_F_MODE_ARM_T32 UINT32_C(0x00000020) 1096 1097 /** ARM Mode: Get the exception (privilege) level. */ 1098 # define IEM_F_MODE_ARM_GET_EL(a_fExec) (((a_fExec) >> IEM_F_MODE_ARM_EL_SHIFT) & IEM_F_MODE_ARM_EL_SMASK) 1099 #endif /* ARM || doxygen */ 1037 1100 1038 1101 /** Bypass access handlers when set. */ … … 1049 1112 1050 1113 /** Pending breakpoint mask (what iemCalcExecDbgFlags works out). */ 1051 #define IEM_F_PENDING_BRK_MASK (IEM_F_PENDING_BRK_INSTR | IEM_F_PENDING_BRK_DATA | IEM_F_PENDING_BRK_X86_IO) 1114 #if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING) 1115 # define IEM_F_PENDING_BRK_MASK (IEM_F_PENDING_BRK_INSTR | IEM_F_PENDING_BRK_DATA | IEM_F_PENDING_BRK_X86_IO) 1116 #else 1117 # define IEM_F_PENDING_BRK_MASK (IEM_F_PENDING_BRK_INSTR | IEM_F_PENDING_BRK_DATA) 1118 #endif 1052 1119 1053 1120 /** Caller configurable options. */ 1054 #define IEM_F_USER_OPTS (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK) 1055 1056 /** X86: The current protection level (CPL) shift factor. */ 1057 #define IEM_F_X86_CPL_SHIFT 8 1058 /** X86: The current protection level (CPL) mask. */ 1059 #define IEM_F_X86_CPL_MASK UINT32_C(0x00000300) 1060 /** X86: The current protection level (CPL) shifted mask. */ 1061 #define IEM_F_X86_CPL_SMASK UINT32_C(0x00000003) 1062 1063 /** X86: Alignment checks enabled (CR0.AM=1 & EFLAGS.AC=1). */ 1064 #define IEM_F_X86_AC UINT32_C(0x00080000) 1065 1066 /** X86 execution context. 1067 * The IEM_F_X86_CTX_XXX values are individual flags that can be combined (with 1068 * the exception of IEM_F_X86_CTX_NORMAL). This allows running VMs from SMM 1069 * mode. */ 1070 #define IEM_F_X86_CTX_MASK UINT32_C(0x0000f000) 1071 /** X86 context: Plain regular execution context. */ 1072 #define IEM_F_X86_CTX_NORMAL UINT32_C(0x00000000) 1073 /** X86 context: VT-x enabled. */ 1074 #define IEM_F_X86_CTX_VMX UINT32_C(0x00001000) 1075 /** X86 context: AMD-V enabled. */ 1076 #define IEM_F_X86_CTX_SVM UINT32_C(0x00002000) 1077 /** X86 context: In AMD-V or VT-x guest mode. */ 1078 #define IEM_F_X86_CTX_IN_GUEST UINT32_C(0x00004000) 1079 /** X86 context: System management mode (SMM). */ 1080 #define IEM_F_X86_CTX_SMM UINT32_C(0x00008000) 1081 1082 /** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in 1083 * iemRegFinishClearingRF() most for most situations (CPUMCTX_DBG_HIT_DRX_MASK 1084 * and CPUMCTX_DBG_DBGF_MASK are covered by the IEM_F_PENDING_BRK_XXX bits 1085 * alread). */ 1086 1087 /** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in 1088 * iemRegFinishClearingRF() most for most situations 1089 * (CPUMCTX_DBG_HIT_DRX_MASK and CPUMCTX_DBG_DBGF_MASK are covered by 1090 * the IEM_F_PENDING_BRK_XXX bits alread). */ 1091 1121 #if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING) 1122 # define IEM_F_USER_OPTS (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK) 1123 #else 1124 # define IEM_F_USER_OPTS (IEM_F_BYPASS_HANDLERS) 1125 #endif 1092 1126 /** @} */ 1093 1127 … … 1112 1146 /** Set when we're starting the block in an "interrupt shadow". 1113 1147 * We don't need to distingish between the two types of this mask, thus the one. 1114 * @see CPUMCTX_INHIBIT_SHADOW, CPUMIsInInterruptShadow() */ 1148 * @see CPUMCTX_INHIBIT_SHADOW, CPUMIsInInterruptShadow() 1149 * @note x86 specific */ 1115 1150 #define IEMTB_F_INHIBIT_SHADOW UINT32_C(0x04000000) 1116 1151 /** Set when we're currently inhibiting NMIs 1117 * @see CPUMCTX_INHIBIT_NMI, CPUMAreInterruptsInhibitedByNmi() */ 1152 * @see CPUMCTX_INHIBIT_NMI, CPUMAreInterruptsInhibitedByNmi() 1153 * @note x86 specific */ 1118 1154 #define IEMTB_F_INHIBIT_NMI UINT32_C(0x08000000) 1119 1155 … … 1136 1172 * have any real effect on TB/memory/recompiling load. 1137 1173 */ 1138 #define IEMTB_F_KEY_MASK ((UINT32_MAX & ~(IEM_F_X86_CTX_MASK | IEMTB_F_TYPE_MASK)) | IEM_F_X86_CTX_SMM) 1174 #if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING) 1175 # define IEMTB_F_KEY_MASK ((UINT32_MAX & ~(IEM_F_X86_CTX_MASK | IEMTB_F_TYPE_MASK)) | IEM_F_X86_CTX_SMM) 1176 #else 1177 # define IEMTB_F_KEY_MASK (UINT32_MAX) 1178 #endif 1139 1179 /** @} */ 1140 1180 1141 AssertCompile( (IEM_F_MODE_X86_16BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT); 1181 #ifdef VBOX_VMM_TARGET_X86 1182 AssertCompile( (IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_16BIT); 1142 1183 AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)); 1143 1184 AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_PROT_MASK)); 1144 1185 AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_V86_MASK)); 1145 AssertCompile( (IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_ CPUMODE_MASK) == IEMMODE_16BIT);1186 AssertCompile( (IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_16BIT); 1146 1187 AssertCompile( IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK); 1147 1188 AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_PROT_MASK)); 1148 1189 AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_V86_MASK)); 1149 AssertCompile( (IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_ CPUMODE_MASK) == IEMMODE_16BIT);1190 AssertCompile( (IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_16BIT); 1150 1191 AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)); 1151 1192 AssertCompile( IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_PROT_MASK); 1152 1193 AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_V86_MASK)); 1153 AssertCompile( (IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_ CPUMODE_MASK) == IEMMODE_16BIT);1194 AssertCompile( (IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_16BIT); 1154 1195 AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK); 1155 1196 AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_PROT_MASK); … … 1159 1200 AssertCompile( IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_V86_MASK); 1160 1201 1161 AssertCompile( (IEM_F_MODE_X86_32BIT & IEM_F_MODE_ CPUMODE_MASK) == IEMMODE_32BIT);1202 AssertCompile( (IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_32BIT); 1162 1203 AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)); 1163 1204 AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_PROT_MASK)); 1164 AssertCompile( (IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_ CPUMODE_MASK) == IEMMODE_32BIT);1205 AssertCompile( (IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_32BIT); 1165 1206 AssertCompile( IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK); 1166 1207 AssertCompile(!(IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_PROT_MASK)); 1167 AssertCompile( (IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_ CPUMODE_MASK) == IEMMODE_32BIT);1208 AssertCompile( (IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_32BIT); 1168 1209 AssertCompile(!(IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)); 1169 1210 AssertCompile( IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_PROT_MASK); 1170 AssertCompile( (IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_ CPUMODE_MASK) == IEMMODE_32BIT);1211 AssertCompile( (IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_32BIT); 1171 1212 AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK); 1172 1213 AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_PROT_MASK); 1173 1214 1174 AssertCompile( (IEM_F_MODE_X86_64BIT & IEM_F_MODE_ CPUMODE_MASK) == IEMMODE_64BIT);1215 AssertCompile( (IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_64BIT); 1175 1216 AssertCompile( IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_PROT_MASK); 1176 1217 AssertCompile(!(IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)); 1218 #endif /* VBOX_VMM_TARGET_X86 */ 1219 1220 #ifdef VBOX_VMM_TARGET_ARMV8 1221 AssertCompile(IEM_F_MODE_ARM_EL_SHIFT == ARMV8_SPSR_EL2_AARCH64_EL_SHIFT); 1222 AssertCompile(IEM_F_MODE_ARM_EL_MASK == ARMV8_SPSR_EL2_AARCH64_EL); 1223 AssertCompile(IEM_F_MODE_ARM_32BIT == ARMV8_SPSR_EL2_AARCH64_M4); 1224 AssertCompile(IEM_F_MODE_ARM_T32 == ARMV8_SPSR_EL2_AARCH64_T); 1225 #endif 1177 1226 1178 1227 /** Native instruction type for use with the native code generator. … … 1221 1270 uint8_t fFlags; 1222 1271 1223 /** Generic parameters. */ 1272 /** Generic parameters. 1273 * @todo ARM: Hope we can get away with one param here... */ 1224 1274 uint64_t auParams[3]; 1225 1275 } IEMTHRDEDCALLENTRY; -
trunk/src/VBox/VMM/include/IEMMc.h
r108313 r108368 1091 1091 * Helper macro for check that all important IEM_CIMPL_F_XXX bits are set. 1092 1092 */ 1093 #if def VBOX_STRICT1093 #if defined(VBOX_STRICT) && defined(VBOX_VMM_TARGET_X86) 1094 1094 # define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) \ 1095 1095 do { \ … … 1102 1102 if (rcStrictHlp == VINF_SUCCESS) \ 1103 1103 { \ 1104 uint64_t const fRipMask = (pVCpu->iem.s.fExec & IEM_F_MODE_ CPUMODE_MASK) == IEMMODE_64BIT ? UINT64_MAX : UINT32_MAX; \1104 uint64_t const fRipMask = (pVCpu->iem.s.fExec & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_64BIT ? UINT64_MAX : UINT32_MAX; \ 1105 1105 AssertMsg( ((a_fFlags) & IEM_CIMPL_F_BRANCH_ANY) \ 1106 1106 || ( ((uRipBefore + cbInstr) & fRipMask) == pVCpu->cpum.GstCtx.rip \ … … 1127 1127 /* in case ES, DS or SS was external initially (happens alot with HM): */ \ 1128 1128 || ( fExecBefore == (fExecRecalc & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK) \ 1129 && (fExecRecalc & IEM_F_MODE_ CPUMODE_MASK) == IEMMODE_32BIT), \1129 && (fExecRecalc & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_32BIT), \ 1130 1130 ("fExec=%#x -> %#x (diff %#x)\n", fExecBefore, fExecRecalc, fExecBefore ^ fExecRecalc)); \ 1131 1131 } \ -
trunk/src/VBox/VMM/include/IEMN8veRecompilerTlbLookup.h
r107199 r108368 100 100 : fSkip( a_pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(a_idxVarGCPtrMem)].enmKind 101 101 == kIemNativeVarKind_Immediate 102 && ( (a_pReNative->fExec & IEM_F_MODE_ CPUMODE_MASK) != IEMMODE_64BIT102 && ( (a_pReNative->fExec & IEM_F_MODE_X86_CPUMODE_MASK) != IEMMODE_64BIT 103 103 ? (uint64_t)(UINT32_MAX - a_cbMem - a_offDisp) 104 104 : (uint64_t)(UINT64_MAX - a_cbMem - a_offDisp)) … … 126 126 ? UINT8_MAX 127 127 : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_BASE(a_iSegReg))) 128 , idxRegSegLimit(a_fFlat || a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_ CPUMODE_MASK) == IEMMODE_64BIT || fSkip128 , idxRegSegLimit(a_fFlat || a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_64BIT || fSkip 129 129 ? UINT8_MAX 130 130 : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_LIMIT(a_iSegReg))) 131 , idxRegSegAttrib(a_fFlat || a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_ CPUMODE_MASK) == IEMMODE_64BIT || fSkip131 , idxRegSegAttrib(a_fFlat || a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_64BIT || fSkip 132 132 ? UINT8_MAX 133 133 : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_ATTRIB(a_iSegReg))) … … 166 166 ? UINT8_MAX 167 167 : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_BASE(a_iSegReg))) 168 , idxRegSegLimit((a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_ CPUMODE_MASK) == IEMMODE_64BIT) || fSkip168 , idxRegSegLimit((a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_64BIT) || fSkip 169 169 ? UINT8_MAX 170 170 : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_LIMIT(a_iSegReg))) 171 , idxRegSegAttrib((a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_ CPUMODE_MASK) == IEMMODE_64BIT) || fSkip171 , idxRegSegAttrib((a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_64BIT) || fSkip 172 172 ? UINT8_MAX 173 173 : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_ATTRIB(a_iSegReg))) … … 349 349 uint32_t const offCheckExpandDown = off; 350 350 uint32_t offFixupLimitDone = 0; 351 if (a_fDataTlb && iSegReg != UINT8_MAX && (pReNative->fExec & IEM_F_MODE_ CPUMODE_MASK) != IEMMODE_64BIT)351 if (a_fDataTlb && iSegReg != UINT8_MAX && (pReNative->fExec & IEM_F_MODE_X86_CPUMODE_MASK) != IEMMODE_64BIT) 352 352 { 353 353 /* cmp seglim, regptr */ … … 539 539 * doing TLB loading. 540 540 */ 541 if (a_fDataTlb && iSegReg != UINT8_MAX && (pReNative->fExec & IEM_F_MODE_ CPUMODE_MASK) != IEMMODE_64BIT)541 if (a_fDataTlb && iSegReg != UINT8_MAX && (pReNative->fExec & IEM_F_MODE_X86_CPUMODE_MASK) != IEMMODE_64BIT) 542 542 { 543 543 /* Check that we've got a segment loaded and that it allows the access.
Note:
See TracChangeset
for help on using the changeset viewer.