Changeset 100072 in vbox for trunk/src/VBox/VMM/include
- Timestamp:
- Jun 5, 2023 3:17:42 PM (20 months ago)
- Location:
- trunk/src/VBox/VMM/include
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/include/IEMInternal-armv8.h
r99739 r100072 1230 1230 * @return Strict VBox status code. 1231 1231 */ 1232 #define IEMOP_RAISE_DIVIDE_ERROR () IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)1232 #define IEMOP_RAISE_DIVIDE_ERROR_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(iemCImplRaiseDivideError) 1233 1233 1234 1234 /** … … 1240 1240 * @return Strict VBox status code. 1241 1241 */ 1242 #define IEMOP_RAISE_INVALID_OPCODE () IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)1242 #define IEMOP_RAISE_INVALID_OPCODE_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(iemCImplRaiseInvalidOpcode) 1243 1243 /** @} */ 1244 1244 -
trunk/src/VBox/VMM/include/IEMInternal.h
r100061 r100072 4157 4157 * @return Strict VBox status code. 4158 4158 */ 4159 #define IEMOP_RAISE_DIVIDE_ERROR () IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)4159 #define IEMOP_RAISE_DIVIDE_ERROR_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseDivideError) 4160 4160 4161 4161 /** … … 4167 4167 * @return Strict VBox status code. 4168 4168 */ 4169 #define IEMOP_RAISE_INVALID_LOCK_PREFIX () IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)4169 #define IEMOP_RAISE_INVALID_LOCK_PREFIX_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseInvalidLockPrefix) 4170 4170 4171 4171 /** … … 4177 4177 * @return Strict VBox status code. 4178 4178 */ 4179 #define IEMOP_RAISE_INVALID_OPCODE () IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)4179 #define IEMOP_RAISE_INVALID_OPCODE_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseInvalidOpcode) 4180 4180 /** @} */ 4181 4181 -
trunk/src/VBox/VMM/include/IEMMc.h
r100052 r100072 1074 1074 #define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3)) 1075 1075 1076 /** @name IEM_CIMPL_F_XXX - State change clues for CIMPL calls. 1077 * 1078 * These clues are mainly for the recompiler, so that it can 1079 * 1080 * @{ */ 1081 #define IEM_CIMPL_F_MODE RT_BIT_32(0) /**< Execution flags may change (IEMCPU::fExec). */ 1082 #define IEM_CIMPL_F_BRANCH RT_BIT_32(1) /**< Branches (changes RIP, maybe CS). */ 1083 #define IEM_CIMPL_F_RFLAGS RT_BIT_32(2) /**< May change significant portions of RFLAGS. */ 1084 #define IEM_CIMPL_F_STATUS_FLAGS RT_BIT_32(3) /**< May change the status bits (X86_EFL_STATUS_BITS) in RFLAGS . */ 1085 #define IEM_CIMPL_F_VMEXIT RT_BIT_32(4) /**< May trigger a VM exit. */ 1086 #define IEM_CIMPL_F_FPU RT_BIT_32(5) /**< May modify FPU state. */ 1087 #define IEM_CIMPL_F_REP RT_BIT_32(6) /**< REP prefixed instruction which may yield before updating PC. */ 1088 #define IEM_CIMPL_F_END_TB RT_BIT_32(7) 1089 /** Convenience: Raise exception (technically unnecessary, since it shouldn't return VINF_SUCCESS). */ 1090 #define IEM_CIMPL_F_XCPT (IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT) 1091 /** @} */ 1092 1093 /** @def IEM_MC_CALL_CIMPL_HLP_RET 1094 * Helper macro for check that all important IEM_CIMPL_F_XXX bits are set. 1095 */ 1096 #ifdef VBOX_STRICT 1097 #define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) \ 1098 do { \ 1099 uint8_t const cbInstr = IEM_GET_INSTR_LEN(pVCpu); /* may be flushed */ \ 1100 uint16_t const uCsBefore = pVCpu->cpum.GstCtx.cs.Sel; \ 1101 uint64_t const uRipBefore = pVCpu->cpum.GstCtx.rip; \ 1102 uint32_t const fEflBefore = pVCpu->cpum.GstCtx.eflags.u; \ 1103 uint32_t const fExecBefore = pVCpu->iem.s.fExec; \ 1104 VBOXSTRICTRC const rcStrictHlp = a_CallExpr; \ 1105 if (rcStrictHlp == VINF_SUCCESS) \ 1106 { \ 1107 AssertMsg( ((a_fFlags) & IEM_CIMPL_F_BRANCH) \ 1108 || ( uRipBefore + cbInstr == pVCpu->cpum.GstCtx.rip \ 1109 && uCsBefore == pVCpu->cpum.GstCtx.cs.Sel) \ 1110 || ( ((a_fFlags) & IEM_CIMPL_F_REP) \ 1111 && uRipBefore == pVCpu->cpum.GstCtx.rip \ 1112 && uCsBefore == pVCpu->cpum.GstCtx.cs.Sel), \ 1113 ("CS:RIP=%04x:%08RX64 + %x -> %04x:%08RX64, expected %04x:%08RX64\n", uCsBefore, uRipBefore, cbInstr, \ 1114 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uCsBefore, uRipBefore + cbInstr)); \ 1115 if ((a_fFlags) & IEM_CIMPL_F_RFLAGS) \ 1116 { /* No need to check fEflBefore */ Assert(!((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS)); } \ 1117 else if ((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS) \ 1118 AssertMsg( (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)) \ 1119 == (fEflBefore & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)), \ 1120 ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \ 1121 else \ 1122 AssertMsg( (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_RF)) \ 1123 == (fEflBefore & ~(X86_EFL_RF)), \ 1124 ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \ 1125 if (!((a_fFlags) & IEM_CIMPL_F_MODE)) \ 1126 { \ 1127 uint32_t fExecRecalc = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS); \ 1128 AssertMsg(fExecBefore == fExecRecalc, \ 1129 ("fExec=%#x -> %#x (diff %#x)\n", fExecBefore, fExecRecalc, fExecBefore ^ fExecRecalc)); \ 1130 } \ 1131 } \ 1132 return rcStrictHlp; \ 1133 } while (0) 1134 #else 1135 # define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) return a_CallExpr 1136 #endif 1137 1076 1138 /** 1077 1139 * Defers the rest of the instruction emulation to a C implementation routine 1078 1140 * and returns, only taking the standard parameters. 1079 1141 * 1142 * @param a_fFlags IEM_CIMPL_F_XXX. 1080 1143 * @param a_pfnCImpl The pointer to the C routine. 1081 1144 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0. 1082 1145 */ 1083 #define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)) 1146 #define IEM_MC_CALL_CIMPL_0(a_fFlags, a_pfnCImpl) \ 1147 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))) 1084 1148 1085 1149 /** … … 1087 1151 * returns, taking one argument in addition to the standard ones. 1088 1152 * 1153 * @param a_fFlags IEM_CIMPL_F_XXX. 1089 1154 * @param a_pfnCImpl The pointer to the C routine. 1090 1155 * @param a0 The argument. 1091 1156 */ 1092 #define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0) 1157 #define IEM_MC_CALL_CIMPL_1(a_fFlags, a_pfnCImpl, a0) \ 1158 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)) 1093 1159 1094 1160 /** … … 1096 1162 * and returns, taking two arguments in addition to the standard ones. 1097 1163 * 1164 * @param a_fFlags IEM_CIMPL_F_XXX. 1098 1165 * @param a_pfnCImpl The pointer to the C routine. 1099 1166 * @param a0 The first extra argument. 1100 1167 * @param a1 The second extra argument. 1101 1168 */ 1102 #define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1) 1169 #define IEM_MC_CALL_CIMPL_2(a_fFlags, a_pfnCImpl, a0, a1) \ 1170 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)) 1103 1171 1104 1172 /** … … 1106 1174 * and returns, taking three arguments in addition to the standard ones. 1107 1175 * 1176 * @param a_fFlags IEM_CIMPL_F_XXX. 1108 1177 * @param a_pfnCImpl The pointer to the C routine. 1109 1178 * @param a0 The first extra argument. … … 1111 1180 * @param a2 The third extra argument. 1112 1181 */ 1113 #define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2) 1182 #define IEM_MC_CALL_CIMPL_3(a_fFlags, a_pfnCImpl, a0, a1, a2) \ 1183 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)) 1114 1184 1115 1185 /** … … 1117 1187 * and returns, taking four arguments in addition to the standard ones. 1118 1188 * 1189 * @param a_fFlags IEM_CIMPL_F_XXX. 1119 1190 * @param a_pfnCImpl The pointer to the C routine. 1120 1191 * @param a0 The first extra argument. … … 1123 1194 * @param a3 The fourth extra argument. 1124 1195 */ 1125 #define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3) 1196 #define IEM_MC_CALL_CIMPL_4(a_fFlags, a_pfnCImpl, a0, a1, a2, a3) \ 1197 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)) 1126 1198 1127 1199 /** … … 1129 1201 * and returns, taking two arguments in addition to the standard ones. 1130 1202 * 1203 * @param a_fFlags IEM_CIMPL_F_XXX. 1131 1204 * @param a_pfnCImpl The pointer to the C routine. 1132 1205 * @param a0 The first extra argument. … … 1136 1209 * @param a4 The fifth extra argument. 1137 1210 */ 1138 #define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4) 1211 #define IEM_MC_CALL_CIMPL_5(a_fFlags, a_pfnCImpl, a0, a1, a2, a3, a4) \ 1212 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)) 1139 1213 1140 1214 /** … … 1144 1218 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it. 1145 1219 * 1220 * @param a_fFlags IEM_CIMPL_F_XXX. 1146 1221 * @param a_pfnCImpl The pointer to the C routine. 1147 1222 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0. 1148 1223 */ 1149 #define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)) 1224 #define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_pfnCImpl) \ 1225 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))) 1150 1226 1151 1227 /** … … 1155 1231 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it. 1156 1232 * 1233 * @param a_fFlags IEM_CIMPL_F_XXX. 1157 1234 * @param a_pfnCImpl The pointer to the C routine. 1158 1235 * @param a0 The argument. 1159 1236 */ 1160 #define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0) 1237 #define IEM_MC_DEFER_TO_CIMPL_1_RET(a_fFlags, a_pfnCImpl, a0) \ 1238 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)) 1161 1239 1162 1240 /** … … 1166 1244 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it. 1167 1245 * 1246 * @param a_fFlags IEM_CIMPL_F_XXX. 1168 1247 * @param a_pfnCImpl The pointer to the C routine. 1169 1248 * @param a0 The first extra argument. 1170 1249 * @param a1 The second extra argument. 1171 1250 */ 1172 #define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1) 1251 #define IEM_MC_DEFER_TO_CIMPL_2_RET(a_fFlags, a_pfnCImpl, a0, a1) \ 1252 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)) 1173 1253 1174 1254 /** … … 1178 1258 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it. 1179 1259 * 1260 * @param a_fFlags IEM_CIMPL_F_XXX. 1180 1261 * @param a_pfnCImpl The pointer to the C routine. 1181 1262 * @param a0 The first extra argument. … … 1183 1264 * @param a2 The third extra argument. 1184 1265 */ 1185 #define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2) 1266 #define IEM_MC_DEFER_TO_CIMPL_3_RET(a_fFlags, a_pfnCImpl, a0, a1, a2) \ 1267 IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)) 1268 1186 1269 1187 1270 /** -
trunk/src/VBox/VMM/include/IEMOpHlp.h
r100052 r100072 80 80 { \ 81 81 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \ 82 return IEMOP_RAISE_INVALID_OPCODE(); \82 IEMOP_RAISE_INVALID_OPCODE_RET(); \ 83 83 } \ 84 84 typedef int ignore_semicolon … … 91 91 RT_NOREF_PV(a_Name0); \ 92 92 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \ 93 return IEMOP_RAISE_INVALID_OPCODE(); \93 IEMOP_RAISE_INVALID_OPCODE_RET(); \ 94 94 } \ 95 95 typedef int ignore_semicolon … … 225 225 { \ 226 226 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \ 227 return IEMOP_RAISE_INVALID_OPCODE(); \227 IEMOP_RAISE_INVALID_OPCODE_RET(); \ 228 228 } \ 229 229 } while (0) … … 232 232 do { \ 233 233 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \ 234 else return IEMOP_RAISE_INVALID_OPCODE(); \234 else IEMOP_RAISE_INVALID_OPCODE_RET(); \ 235 235 } while (0) 236 236 #endif … … 291 291 { \ 292 292 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \ 293 else return IEMOP_RAISE_INVALID_OPCODE(); \293 else IEMOP_RAISE_INVALID_OPCODE_RET(); \ 294 294 } while (0) 295 295 … … 312 312 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \ 313 313 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \ 314 return IEMOP_RAISE_INVALID_OPCODE(); \314 IEMOP_RAISE_INVALID_OPCODE_RET(); \ 315 315 } \ 316 316 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \ … … 318 318 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \ 319 319 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \ 320 return IEMOP_RAISE_INVALID_OPCODE(); \320 IEMOP_RAISE_INVALID_OPCODE_RET(); \ 321 321 } \ 322 322 } \ … … 336 336 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \ 337 337 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \ 338 return IEMOP_RAISE_INVALID_OPCODE(); \338 IEMOP_RAISE_INVALID_OPCODE_RET(); \ 339 339 } \ 340 340 } while (0) … … 349 349 { /* likely */ } \ 350 350 else \ 351 return IEMOP_RAISE_INVALID_OPCODE(); \351 IEMOP_RAISE_INVALID_OPCODE_RET(); \ 352 352 } while (0) 353 353 … … 360 360 { /* likely */ } \ 361 361 else \ 362 return IEMOP_RAISE_INVALID_OPCODE(); \362 IEMOP_RAISE_INVALID_OPCODE_RET(); \ 363 363 } while (0) 364 364 … … 422 422 { /* likely */ } \ 423 423 else \ 424 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \424 IEMOP_RAISE_INVALID_LOCK_PREFIX_RET(); \ 425 425 } while (0) 426 426 … … 436 436 { /* likely */ } \ 437 437 else \ 438 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \438 IEMOP_RAISE_INVALID_LOCK_PREFIX_RET(); \ 439 439 } while (0) 440 440 … … 451 451 { /* likely */ } \ 452 452 else \ 453 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \453 IEMOP_RAISE_INVALID_LOCK_PREFIX_RET(); \ 454 454 } while (0) 455 455 … … 469 469 { /* likely */ } \ 470 470 else \ 471 return IEMOP_RAISE_INVALID_OPCODE(); \471 IEMOP_RAISE_INVALID_OPCODE_RET(); \ 472 472 } while (0) 473 473 … … 487 487 { /* likely */ } \ 488 488 else \ 489 return IEMOP_RAISE_INVALID_OPCODE(); \489 IEMOP_RAISE_INVALID_OPCODE_RET(); \ 490 490 } while (0) 491 491 … … 506 506 { /* likely */ } \ 507 507 else \ 508 return IEMOP_RAISE_INVALID_OPCODE(); \508 IEMOP_RAISE_INVALID_OPCODE_RET(); \ 509 509 } while (0) 510 510 … … 526 526 { /* likely */ } \ 527 527 else \ 528 return IEMOP_RAISE_INVALID_OPCODE(); \528 IEMOP_RAISE_INVALID_OPCODE_RET(); \ 529 529 } while (0) 530 530 … … 537 537 { \ 538 538 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \ 539 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \539 IEMOP_RAISE_INVALID_LOCK_PREFIX_RET(); \ 540 540 } \ 541 541 } while (0) … … 548 548 { \ 549 549 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \ 550 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \550 IEMOP_RAISE_INVALID_LOCK_PREFIX_RET(); \ 551 551 } \ 552 552 } while (0) … … 562 562 { /* likely */ } \ 563 563 else \ 564 return IEMOP_RAISE_INVALID_OPCODE(); \564 IEMOP_RAISE_INVALID_OPCODE_RET(); \ 565 565 } while (0) 566 566 … … 575 575 { /* likely */ } \ 576 576 else \ 577 return IEMOP_RAISE_INVALID_OPCODE(); \577 IEMOP_RAISE_INVALID_OPCODE_RET(); \ 578 578 } while (0) 579 579 … … 587 587 { /* likely */ } \ 588 588 else \ 589 return IEMOP_RAISE_INVALID_OPCODE(); \589 IEMOP_RAISE_INVALID_OPCODE_RET(); \ 590 590 } while (0) 591 591
Note:
See TracChangeset
for help on using the changeset viewer.