Changeset 108278 in vbox
- Timestamp:
- Feb 18, 2025 3:46:53 PM (2 months ago)
- svn:sync-xref-src-repo-rev:
- 167608
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 20 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r108260 r108278 543 543 RT_NOREF_PV(pszFunction); 544 544 545 #ifdef IEM_WITH_SETJMP546 545 VBOXSTRICTRC rcStrict; 547 546 IEM_TRY_SETJMP(pVCpu, rcStrict) … … 555 554 } 556 555 IEM_CATCH_LONGJMP_END(pVCpu); 557 #else558 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);559 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);560 #endif561 556 if (rcStrict == VINF_SUCCESS) 562 557 pVCpu->iem.s.cInstructions++; … … 604 599 iemLogCurInstr(pVCpu, false, pszFunction); 605 600 #endif 606 #ifdef IEM_WITH_SETJMP607 601 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict) 608 602 { … … 615 609 } 616 610 IEM_CATCH_LONGJMP_END(pVCpu); 617 #else618 IEM_OPCODE_GET_FIRST_U8(&b);619 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);620 #endif621 611 if (rcStrict == VINF_SUCCESS) 622 612 { … … 874 864 if (rcStrict == VINF_SUCCESS) 875 865 { 876 #ifdef IEM_WITH_SETJMP877 866 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */ 878 867 IEM_TRY_SETJMP(pVCpu, rcStrict) 879 #endif880 868 { 881 869 /* … … 960 948 } 961 949 } 962 #ifdef IEM_WITH_SETJMP963 950 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict); 964 951 { 965 952 if (pVCpu->iem.s.cActiveMappings > 0) 966 953 iemMemRollback(pVCpu); 967 # 954 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX) 968 955 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict); 969 # 956 #endif 970 957 pVCpu->iem.s.cLongJumps++; 971 958 } 972 959 IEM_CATCH_LONGJMP_END(pVCpu); 973 #endif974 960 975 961 /* … … 1037 1023 if (rcStrict == VINF_SUCCESS) 1038 1024 { 1039 #ifdef IEM_WITH_SETJMP1040 1025 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */ 1041 1026 IEM_TRY_SETJMP(pVCpu, rcStrict) 1042 #endif1043 1027 { 1044 1028 #ifdef IN_RING0 … … 1147 1131 } 1148 1132 } 1149 #ifdef IEM_WITH_SETJMP1150 1133 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict); 1151 1134 { … … 1155 1138 } 1156 1139 IEM_CATCH_LONGJMP_END(pVCpu); 1157 #endif1158 1140 1159 1141 /* -
trunk/src/VBox/VMM/VMMAll/IEMAllMem.cpp
r108260 r108278 600 600 } 601 601 602 #ifdef IEM_WITH_SETJMP603 602 604 603 /** … … 679 678 } 680 679 681 #endif /* IEM_WITH_SETJMP */682 680 683 681 #ifndef IN_RING3 -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r107209 r108278 93 93 #ifndef IEM_WITH_DATA_TLB 94 94 # error The data TLB must be enabled for the recompiler. 95 #endif96 97 #ifndef IEM_WITH_SETJMP98 # error The setjmp approach must be enabled for the recompiler.99 95 #endif 100 96 -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdRecompiler.cpp
r108260 r108278 99 99 # error The data TLB must be enabled for the recompiler. 100 100 #endif 101 102 #ifndef IEM_WITH_SETJMP103 # error The setjmp approach must be enabled for the recompiler.104 #endif105 106 101 107 102 -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllMem-x86.cpp
r108260 r108278 662 662 } 663 663 664 #ifdef IEM_WITH_SETJMP665 664 666 665 /** … … 1102 1101 } 1103 1102 1104 #endif /* IEM_WITH_SETJMP */1105 1103 1106 1104 … … 1374 1372 1375 1373 1376 #ifdef IEM_WITH_SETJMP1377 1374 /** 1378 1375 * Stores a data dqword, SSE aligned. … … 1397 1394 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst)); 1398 1395 } 1399 #endif1400 1396 1401 1397 … … 1430 1426 1431 1427 1432 #ifdef IEM_WITH_SETJMP1433 1428 /** 1434 1429 * Stores a data dqword, longjmp on error. … … 1453 1448 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 1454 1449 } 1455 #endif1456 1450 1457 1451 -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllMemRWTmpl-x86.cpp.h
r108226 r108278 51 51 * Standard fetch function. 52 52 * 53 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP 54 * is defined. 53 * This is used by CImpl code. 55 54 */ 56 55 VBOXSTRICTRC RT_CONCAT(iemMemFetchData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puDst, … … 72 71 73 72 74 #ifdef IEM_WITH_SETJMP75 73 /** 76 74 * Safe/fallback fetch function that longjmps on error. 77 75 */ 78 # 76 #ifdef TMPL_MEM_BY_REF 79 77 void 80 78 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pDst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 81 79 { 82 # 80 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 83 81 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++; 84 # 82 # endif 85 83 uint8_t bUnmapInfo; 86 84 TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*pSrc), iSegReg, GCPtrMem, … … 90 88 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pDst)); 91 89 } 92 # 90 #else /* !TMPL_MEM_BY_REF */ 93 91 TMPL_MEM_TYPE 94 92 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 95 93 { 96 # 94 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 97 95 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++; 98 # 96 # endif 99 97 uint8_t bUnmapInfo; 100 98 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem, … … 105 103 return uRet; 106 104 } 107 # endif /* !TMPL_MEM_BY_REF */ 108 #endif /* IEM_WITH_SETJMP */ 105 #endif /* !TMPL_MEM_BY_REF */ 109 106 110 107 … … 113 110 * Standard store function. 114 111 * 115 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP 116 * is defined. 112 * This is used by CImpl code. 117 113 */ 118 114 VBOXSTRICTRC RT_CONCAT(iemMemStoreData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, … … 146 142 147 143 148 #ifdef IEM_WITH_SETJMP149 144 /** 150 145 * Stores a data byte, longjmp on error. … … 163 158 #endif 164 159 { 165 # 166 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 167 # 160 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 161 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 162 #endif 168 163 #ifdef TMPL_MEM_BY_REF 169 164 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue)); … … 181 176 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 182 177 } 183 #endif /* IEM_WITH_SETJMP */ 184 185 186 #ifdef IEM_WITH_SETJMP 178 187 179 188 180 /** … … 200 192 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 201 193 { 202 # 203 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 204 # 194 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 195 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 196 #endif 205 197 Log8(("IEM AT/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem)); 206 198 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */ … … 224 216 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 225 217 { 226 # 227 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 228 # 218 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 219 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 220 #endif 229 221 Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem)); 230 222 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */ … … 248 240 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 249 241 { 250 # 251 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 252 # 242 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 243 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 244 #endif 253 245 Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem)); 254 246 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); /* zero is for the TLB hit */ … … 272 264 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 273 265 { 274 # 275 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 276 # 266 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 267 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 268 #endif 277 269 Log4(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem)); 278 270 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */ … … 280 272 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 281 273 } 282 283 #endif /* IEM_WITH_SETJMP */284 274 285 275 … … 483 473 484 474 485 # ifdef IEM_WITH_SETJMP486 487 475 /** 488 476 * Safe/fallback stack store function that longjmps on error. … … 491 479 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 492 480 { 493 # 494 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 495 # 481 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 482 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 483 # endif 496 484 497 485 uint8_t bUnmapInfo; … … 505 493 506 494 507 # 495 # ifdef TMPL_WITH_PUSH_SREG 508 496 /** 509 497 * Safe/fallback stack SREG store function that longjmps on error. … … 512 500 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 513 501 { 514 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)515 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 516 # endif502 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 503 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 504 # endif 517 505 518 506 /* bs3-cpu-weird-1 explores this instruction. AMD 3990X does it by the book, … … 558 546 } 559 547 } 560 # 548 # endif /* TMPL_WITH_PUSH_SREG */ 561 549 562 550 … … 640 628 } 641 629 642 # 630 # ifdef TMPL_WITH_PUSH_SREG 643 631 /** 644 632 * Safe/fallback stack push function that longjmps on error. … … 646 634 void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 647 635 { 648 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)649 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 650 # endif636 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 637 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 638 # endif 651 639 652 640 /* Decrement the stack pointer (prep). */ … … 676 664 pVCpu->cpum.GstCtx.rsp = uNewRsp; 677 665 } 678 # endif /* TMPL_WITH_PUSH_SREG */ 679 680 # endif /* IEM_WITH_SETJMP */ 666 # endif /* TMPL_WITH_PUSH_SREG */ 681 667 682 668 #endif /* TMPL_MEM_WITH_STACK */ -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllMemRWTmplInline-x86.cpp.h
r108262 r108278 78 78 79 79 80 #ifdef IEM_WITH_SETJMP81 82 83 80 /********************************************************************************************************************************* 84 81 * Fetches * … … 99 96 { 100 97 AssertCompile(sizeof(TMPL_MEM_TYPE) == TMPL_MEM_TYPE_SIZE); 101 # 98 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 102 99 /* 103 100 * Convert from segmented to flat address and check that it doesn't cross a page boundrary. 104 101 */ 105 102 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem); 106 # 103 # if TMPL_MEM_TYPE_SIZE > 1 107 104 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff))) 108 # 105 # endif 109 106 { 110 107 /* … … 128 125 * Fetch and return the data. 129 126 */ 130 # 131 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 132 # 133 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 134 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 135 # 127 # ifdef IEM_WITH_TLB_STATISTICS 128 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 129 # endif 130 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 131 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 132 # ifdef TMPL_MEM_BY_REF 136 133 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 137 134 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n", 138 135 iSegReg, GCPtrMem, GCPtrEff, pValue)); 139 136 return; 140 # 137 # else 141 138 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 142 139 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE "\n", 143 140 iSegReg, GCPtrMem, GCPtrEff, uRet)); 144 141 return uRet; 145 # 142 # endif 146 143 } 147 144 } … … 151 148 outdated page pointer, or other troubles. (This will do a TLB load.) */ 152 149 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem)); 153 # 154 # 150 #endif 151 #ifdef TMPL_MEM_BY_REF 155 152 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, iSegReg, GCPtrMem); 156 # 153 #else 157 154 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem); 158 # 155 #endif 159 156 } 160 157 … … 163 160 * Inlined flat addressing fetch function that longjumps on error. 164 161 */ 165 # 162 #ifdef TMPL_MEM_BY_REF 166 163 DECL_INLINE_THROW(void) 167 164 RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 168 # 165 #else 169 166 DECL_INLINE_THROW(TMPL_MEM_TYPE) 170 167 RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 168 #endif 169 { 170 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT 171 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT 172 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec)); 173 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 174 /* 175 * Check that it doesn't cross a page boundrary. 176 */ 177 # if TMPL_MEM_TYPE_SIZE > 1 178 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem))) 179 # endif 180 { 181 /* 182 * TLB lookup. 183 */ 184 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem); 185 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev); 186 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision) 187 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal))) 188 { 189 /* 190 * Check TLB page table level access flags. 191 */ 192 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 193 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 194 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ 195 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser)) 196 == pVCpu->iem.s.DataTlb.uTlbPhysRev)) 197 { 198 /* 199 * Fetch and return the dword 200 */ 201 # ifdef IEM_WITH_TLB_STATISTICS 202 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 203 # endif 204 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 205 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 206 # ifdef TMPL_MEM_BY_REF 207 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]; 208 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n", 209 GCPtrMem, pValue)); 210 return; 211 # else 212 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]; 213 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet)); 214 return uRet; 215 # endif 216 } 217 } 218 } 219 220 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 221 outdated page pointer, or other troubles. (This will do a TLB load.) */ 222 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 223 #endif 224 #ifdef TMPL_MEM_BY_REF 225 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, UINT8_MAX, GCPtrMem); 226 #else 227 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem); 228 #endif 229 } 230 231 232 /********************************************************************************************************************************* 233 * Stores * 234 *********************************************************************************************************************************/ 235 #ifndef TMPL_MEM_NO_STORE 236 237 /** 238 * Inlined store function that longjumps on error. 239 * 240 * @note The @a iSegRef is not allowed to be UINT8_MAX! 241 */ 242 DECL_INLINE_THROW(void) 243 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, 244 # ifdef TMPL_MEM_BY_REF 245 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP 246 # else 247 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 248 # endif 249 { 250 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 251 /* 252 * Convert from segmented to flat address and check that it doesn't cross a page boundrary. 253 */ 254 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem); 255 # if TMPL_MEM_TYPE_SIZE > 1 256 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff))) 257 # endif 258 { 259 /* 260 * TLB lookup. 261 */ 262 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff); 263 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev); 264 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision) 265 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal))) 266 { 267 /* 268 * Check TLB page table level access flags. 269 */ 270 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 271 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 272 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE 273 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE 274 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser)) 275 == pVCpu->iem.s.DataTlb.uTlbPhysRev)) 276 { 277 /* 278 * Store the value and return. 279 */ 280 # ifdef IEM_WITH_TLB_STATISTICS 281 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 282 # endif 283 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 284 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 285 # ifdef TMPL_MEM_BY_REF 286 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = *pValue; 287 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs (%04x:%RX64)\n", 288 iSegReg, GCPtrMem, GCPtrEff, pValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 289 # else 290 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue; 291 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE " (%04x:%RX64)\n", 292 iSegReg, GCPtrMem, GCPtrEff, uValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 293 # endif 294 return; 295 } 296 } 297 } 298 299 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 300 outdated page pointer, or other troubles. (This will do a TLB load.) */ 301 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem)); 302 # endif 303 # ifdef TMPL_MEM_BY_REF 304 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, pValue); 305 # else 306 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, uValue); 307 # endif 308 } 309 310 311 /** 312 * Inlined flat addressing store function that longjumps on error. 313 */ 314 DECL_INLINE_THROW(void) 315 RT_CONCAT3(iemMemFlatStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, 316 # ifdef TMPL_MEM_BY_REF 317 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP 318 # else 319 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 171 320 # endif 172 321 { … … 195 344 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 196 345 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 197 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ198 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))199 == pVCpu->iem.s.DataTlb.uTlbPhysRev))200 {201 /*202 * Fetch and return the dword203 */204 # ifdef IEM_WITH_TLB_STATISTICS205 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;206 # endif207 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */208 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));209 # ifdef TMPL_MEM_BY_REF210 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];211 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",212 GCPtrMem, pValue));213 return;214 # else215 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];216 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet));217 return uRet;218 # endif219 }220 }221 }222 223 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception224 outdated page pointer, or other troubles. (This will do a TLB load.) */225 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));226 # endif227 # ifdef TMPL_MEM_BY_REF228 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, UINT8_MAX, GCPtrMem);229 # else230 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem);231 # endif232 }233 234 235 /*********************************************************************************************************************************236 * Stores *237 *********************************************************************************************************************************/238 # ifndef TMPL_MEM_NO_STORE239 240 /**241 * Inlined store function that longjumps on error.242 *243 * @note The @a iSegRef is not allowed to be UINT8_MAX!244 */245 DECL_INLINE_THROW(void)246 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,247 # ifdef TMPL_MEM_BY_REF248 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP249 # else250 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP251 # endif252 {253 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)254 /*255 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.256 */257 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);258 # if TMPL_MEM_TYPE_SIZE > 1259 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))260 # endif261 {262 /*263 * TLB lookup.264 */265 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);266 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);267 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)268 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))269 {270 /*271 * Check TLB page table level access flags.272 */273 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);274 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;275 346 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE 276 347 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE … … 281 352 * Store the value and return. 282 353 */ 283 # ifdef IEM_WITH_TLB_STATISTICS 284 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 285 # endif 286 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 287 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 288 # ifdef TMPL_MEM_BY_REF 289 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = *pValue; 290 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs (%04x:%RX64)\n", 291 iSegReg, GCPtrMem, GCPtrEff, pValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 292 # else 293 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue; 294 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE " (%04x:%RX64)\n", 295 iSegReg, GCPtrMem, GCPtrEff, uValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 296 # endif 297 return; 298 } 299 } 300 } 301 302 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 303 outdated page pointer, or other troubles. (This will do a TLB load.) */ 304 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem)); 305 # endif 354 # ifdef IEM_WITH_TLB_STATISTICS 355 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 356 # endif 357 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 358 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 306 359 # ifdef TMPL_MEM_BY_REF 307 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, pValue);308 # else309 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, uValue);310 # endif311 }312 313 314 /**315 * Inlined flat addressing store function that longjumps on error.316 */317 DECL_INLINE_THROW(void)318 RT_CONCAT3(iemMemFlatStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,319 # ifdef TMPL_MEM_BY_REF320 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP321 # else322 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP323 # endif324 {325 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT326 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT327 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec));328 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)329 /*330 * Check that it doesn't cross a page boundrary.331 */332 # if TMPL_MEM_TYPE_SIZE > 1333 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))334 # endif335 {336 /*337 * TLB lookup.338 */339 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);340 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);341 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)342 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))343 {344 /*345 * Check TLB page table level access flags.346 */347 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);348 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;349 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE350 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE351 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))352 == pVCpu->iem.s.DataTlb.uTlbPhysRev))353 {354 /*355 * Store the value and return.356 */357 # ifdef IEM_WITH_TLB_STATISTICS358 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;359 # endif360 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */361 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));362 # ifdef TMPL_MEM_BY_REF363 360 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = *pValue; 364 361 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n", 365 362 GCPtrMem, pValue)); 366 # 363 # else 367 364 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue; 368 365 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue)); 369 # 366 # endif 370 367 return; 371 368 } … … 376 373 outdated page pointer, or other troubles. (This will do a TLB load.) */ 377 374 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 378 # 379 # 375 # endif 376 # ifdef TMPL_MEM_BY_REF 380 377 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, pValue); 381 # 378 # else 382 379 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, uValue); 383 # 384 } 385 386 # 380 # endif 381 } 382 383 #endif /* !TMPL_MEM_NO_STORE */ 387 384 388 385 … … 390 387 * Mapping / Direct Memory Access * 391 388 *********************************************************************************************************************************/ 392 # 389 #ifndef TMPL_MEM_NO_MAPPING 393 390 394 391 /** … … 401 398 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 402 399 { 403 # 400 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 404 401 /* 405 402 * Convert from segmented to flat address and check that it doesn't cross a page boundrary. … … 432 429 * Return the address. 433 430 */ 434 # 435 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 436 # 431 # ifdef IEM_WITH_TLB_STATISTICS 432 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 433 # endif 437 434 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 438 435 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 448 445 outdated page pointer, or other troubles. (This will do a TLB load.) */ 449 446 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem)); 450 # 447 # endif 451 448 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 452 449 } … … 462 459 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 463 460 { 464 # 461 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 465 462 /* 466 463 * Check that the address doesn't cross a page boundrary. … … 492 489 * Return the address. 493 490 */ 494 # 495 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 496 # 491 # ifdef IEM_WITH_TLB_STATISTICS 492 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 493 # endif 497 494 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 498 495 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 508 505 outdated page pointer, or other troubles. (This will do a TLB load.) */ 509 506 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 510 # 507 # endif 511 508 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem); 512 509 } 513 510 514 # 511 # ifdef TMPL_MEM_WITH_ATOMIC_MAPPING 515 512 516 513 /** … … 528 525 */ 529 526 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem); 530 # if TMPL_MEM_TYPE_SIZE > 1527 # if TMPL_MEM_TYPE_SIZE > 1 531 528 if (RT_LIKELY(!(GCPtrEff & TMPL_MEM_TYPE_ALIGN))) /* strictly aligned, otherwise do fall back which knows th details. */ 532 # endif529 # endif 533 530 { 534 531 /* … … 588 585 * Check that the address doesn't cross a page boundrary. 589 586 */ 590 # if TMPL_MEM_TYPE_SIZE > 1587 # if TMPL_MEM_TYPE_SIZE > 1 591 588 if (RT_LIKELY(!(GCPtrMem & TMPL_MEM_TYPE_ALIGN))) /* strictly aligned, otherwise do fall back which knows th details. */ 592 # endif589 # endif 593 590 { 594 591 /* … … 634 631 } 635 632 636 # 633 # endif /* TMPL_MEM_WITH_ATOMIC_MAPPING */ 637 634 638 635 /** … … 643 640 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 644 641 { 645 # 642 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 646 643 /* 647 644 * Convert from segmented to flat address and check that it doesn't cross a page boundrary. … … 674 671 * Return the address. 675 672 */ 676 # 677 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 678 # 673 # ifdef IEM_WITH_TLB_STATISTICS 674 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 675 # endif 679 676 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 680 677 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 690 687 outdated page pointer, or other troubles. (This will do a TLB load.) */ 691 688 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem)); 692 # 689 # endif 693 690 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 694 691 } … … 702 699 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 703 700 { 704 # 701 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 705 702 /* 706 703 * Check that the address doesn't cross a page boundrary. … … 732 729 * Return the address. 733 730 */ 734 # 735 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 736 # 731 # ifdef IEM_WITH_TLB_STATISTICS 732 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 733 # endif 737 734 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 738 735 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 748 745 outdated page pointer, or other troubles. (This will do a TLB load.) */ 749 746 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 750 # 747 # endif 751 748 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem); 752 749 } … … 760 757 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 761 758 { 762 # 759 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 763 760 /* 764 761 * Convert from segmented to flat address and check that it doesn't cross a page boundrary. … … 767 764 # if TMPL_MEM_TYPE_SIZE > 1 768 765 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff))) 769 # endif766 # endif 770 767 { 771 768 /* … … 790 787 * Return the address. 791 788 */ 792 # 793 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 794 # 789 # ifdef IEM_WITH_TLB_STATISTICS 790 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 791 # endif 795 792 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 796 793 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 806 803 outdated page pointer, or other troubles. (This will do a TLB load.) */ 807 804 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem)); 808 # 805 # endif 809 806 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 810 807 } … … 818 815 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 819 816 { 820 # 817 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 821 818 /* 822 819 * Check that the address doesn't cross a page boundrary. … … 863 860 outdated page pointer, or other troubles. (This will do a TLB load.) */ 864 861 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 865 # 862 # endif 866 863 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem); 867 864 } 868 865 869 # 866 #endif /* !TMPL_MEM_NO_MAPPING */ 870 867 871 868 … … 873 870 * Stack Access * 874 871 *********************************************************************************************************************************/ 875 # ifdef TMPL_MEM_WITH_STACK 876 # if TMPL_MEM_TYPE_SIZE > 8 877 # error "Stack not supported for this type size - please #undef TMPL_MEM_WITH_STACK" 878 # endif 879 # if TMPL_MEM_TYPE_SIZE > 1 && TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE 880 # error "Stack not supported for this alignment size - please #undef TMPL_MEM_WITH_STACK" 881 # endif 882 # ifdef IEM_WITH_SETJMP 872 #ifdef TMPL_MEM_WITH_STACK 873 # if TMPL_MEM_TYPE_SIZE > 8 874 # error "Stack not supported for this type size - please #undef TMPL_MEM_WITH_STACK" 875 # endif 876 # if TMPL_MEM_TYPE_SIZE > 1 && TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE 877 # error "Stack not supported for this alignment size - please #undef TMPL_MEM_WITH_STACK" 878 # endif 883 879 884 880 /** … … 888 884 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 889 885 { 890 # 886 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 891 887 /* 892 888 * Apply segmentation and check that the item doesn't cross a page boundrary. 893 889 */ 894 890 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem); 895 # 891 # if TMPL_MEM_TYPE_SIZE > 1 896 892 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff))) 897 # 893 # endif 898 894 { 899 895 /* … … 919 915 * Do the store and return. 920 916 */ 921 # 922 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 923 # 917 # ifdef IEM_WITH_TLB_STATISTICS 918 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 919 # endif 924 920 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 925 921 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 934 930 outdated page pointer, or other troubles. (This will do a TLB load.) */ 935 931 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff)); 936 # 932 # endif 937 933 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue); 938 934 } 939 935 940 936 941 # 937 # ifdef TMPL_WITH_PUSH_SREG 942 938 /** 943 939 * Stack segment store function that longjmps on error. … … 950 946 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 951 947 { 952 # 948 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 953 949 /* 954 950 * Apply segmentation to the address and check that the item doesn't cross … … 956 952 */ 957 953 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem); 958 # 954 # if TMPL_MEM_TYPE_SIZE > 1 959 955 if (RT_LIKELY( !(GCPtrEff & (sizeof(uint16_t) - 1U)) 960 956 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) )) 961 # 957 # endif 962 958 { 963 959 /* … … 983 979 * Do the push and return. 984 980 */ 985 # 986 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 987 # 981 # ifdef IEM_WITH_TLB_STATISTICS 982 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 983 # endif 988 984 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 989 985 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 998 994 outdated page pointer, or other troubles. (This will do a TLB load.) */ 999 995 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff)); 1000 # 996 # endif 1001 997 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue); 1002 998 } 1003 # 999 # endif /* TMPL_WITH_PUSH_SREG */ 1004 1000 1005 1001 … … 1017 1013 && pVCpu->cpum.GstCtx.ss.u64Base == 0)); 1018 1014 1019 # 1015 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1020 1016 /* 1021 1017 * Check that the item doesn't cross a page boundrary. 1022 1018 */ 1023 # 1019 # if TMPL_MEM_TYPE_SIZE > 1 1024 1020 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem))) 1025 # 1021 # endif 1026 1022 { 1027 1023 /* … … 1047 1043 * Do the push and return. 1048 1044 */ 1049 # 1050 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1051 # 1045 # ifdef IEM_WITH_TLB_STATISTICS 1046 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1047 # endif 1052 1048 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1053 1049 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1063 1059 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1064 1060 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 1065 # 1061 # endif 1066 1062 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue); 1067 1063 } 1068 1064 1069 # 1065 # ifdef TMPL_WITH_PUSH_SREG 1070 1066 /** 1071 1067 * Flat stack segment store function that longjmps on error. … … 1078 1074 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 1079 1075 { 1080 # 1076 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1081 1077 /* 1082 1078 * Check that the item doesn't cross a page boundrary. … … 1107 1103 * Do the push and return. 1108 1104 */ 1109 # 1110 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1111 # 1105 # ifdef IEM_WITH_TLB_STATISTICS 1106 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1107 # endif 1112 1108 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1113 1109 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1123 1119 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1124 1120 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 1125 # 1121 # endif 1126 1122 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue); 1127 1123 } 1128 # 1124 # endif /* TMPL_WITH_PUSH_SREG */ 1129 1125 1130 1126 … … 1135 1131 RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 1136 1132 { 1137 # 1133 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1138 1134 /* 1139 1135 * Apply segmentation to the address and check that the item doesn't cross … … 1141 1137 */ 1142 1138 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem); 1143 # 1139 # if TMPL_MEM_TYPE_SIZE > 1 1144 1140 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff))) 1145 # 1141 # endif 1146 1142 { 1147 1143 /* … … 1166 1162 * Do the pop. 1167 1163 */ 1168 # 1169 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1170 # 1164 # ifdef IEM_WITH_TLB_STATISTICS 1165 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1166 # endif 1171 1167 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1172 1168 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1181 1177 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1182 1178 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff)); 1183 # 1179 # endif 1184 1180 return RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem); 1185 1181 } … … 1192 1188 RT_CONCAT3(iemMemFlatFetchStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 1193 1189 { 1194 # 1190 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1195 1191 /* 1196 1192 * Check that the item doesn't cross a page boundrary. 1197 1193 */ 1198 # 1194 # if TMPL_MEM_TYPE_SIZE > 1 1199 1195 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem))) 1200 # 1196 # endif 1201 1197 { 1202 1198 /* … … 1221 1217 * Do the pop. 1222 1218 */ 1223 # 1224 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1225 # 1219 # ifdef IEM_WITH_TLB_STATISTICS 1220 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1221 # endif 1226 1222 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1227 1223 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1236 1232 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1237 1233 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 1238 # 1234 # endif 1239 1235 return RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem); 1240 1236 } … … 1247 1243 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 1248 1244 { 1249 # 1245 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1250 1246 /* 1251 1247 * Decrement the stack pointer (prep), apply segmentation and check that … … 1281 1277 * Do the push and return. 1282 1278 */ 1283 # 1284 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1285 # 1279 # ifdef IEM_WITH_TLB_STATISTICS 1280 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1281 # endif 1286 1282 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1287 1283 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1298 1294 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1299 1295 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff)); 1300 # 1296 # endif 1301 1297 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue); 1302 1298 } … … 1311 1307 Assert(iGReg < 16); 1312 1308 1313 # 1309 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1314 1310 /* 1315 1311 * Increment the stack pointer (prep), apply segmentation and check that … … 1368 1364 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1369 1365 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff)); 1370 # 1366 # endif 1371 1367 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg); 1372 1368 } 1373 1369 1374 # 1370 # ifdef TMPL_WITH_PUSH_SREG 1375 1371 /** 1376 1372 * Stack segment push function that longjmps on error. … … 1382 1378 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 1383 1379 { 1384 # 1380 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1385 1381 /* See fallback for details on this weirdness: */ 1386 1382 bool const fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu); … … 1394 1390 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp); 1395 1391 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, cbAccess, GCPtrTop); 1396 # 1392 # if TMPL_MEM_TYPE_SIZE > 1 1397 1393 if (RT_LIKELY( !(GCPtrEff & (cbAccess - 1U)) 1398 1394 || ( cbAccess == sizeof(uint16_t) 1399 1395 ? TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) 1400 1396 : TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ) )) 1401 # 1397 # endif 1402 1398 { 1403 1399 /* … … 1423 1419 * Do the push and return. 1424 1420 */ 1425 # 1426 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1427 # 1421 # ifdef IEM_WITH_TLB_STATISTICS 1422 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1423 # endif 1428 1424 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1429 1425 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1458 1454 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1459 1455 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff)); 1460 # 1456 # endif 1461 1457 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue); 1462 1458 } 1463 # 1464 1465 # 1459 # endif /* TMPL_WITH_PUSH_SREG */ 1460 1461 # if TMPL_MEM_TYPE_SIZE != 8 1466 1462 1467 1463 /** … … 1475 1471 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX 1476 1472 && pVCpu->cpum.GstCtx.ss.u64Base == 0); 1477 # 1473 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1478 1474 /* 1479 1475 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary. 1480 1476 */ 1481 1477 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE); 1482 # 1478 # if TMPL_MEM_TYPE_SIZE > 1 1483 1479 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewEsp))) 1484 # 1480 # endif 1485 1481 { 1486 1482 /* … … 1506 1502 * Do the push and return. 1507 1503 */ 1508 # 1509 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1510 # 1504 # ifdef IEM_WITH_TLB_STATISTICS 1505 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1506 # endif 1511 1507 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1512 1508 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1523 1519 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1524 1520 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp)); 1525 # 1521 # endif 1526 1522 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue); 1527 1523 } … … 1535 1531 { 1536 1532 Assert(iGReg < 16); 1537 # 1533 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1538 1534 /* 1539 1535 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary. 1540 1536 */ 1541 1537 uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp; 1542 # 1538 # if TMPL_MEM_TYPE_SIZE > 1 1543 1539 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldEsp))) 1544 # 1540 # endif 1545 1541 { 1546 1542 /* … … 1565 1561 * Do the pop and update the register values. 1566 1562 */ 1567 # 1568 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1569 # 1563 # ifdef IEM_WITH_TLB_STATISTICS 1564 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1565 # endif 1570 1566 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1571 1567 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 1572 1568 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK]; 1573 1569 pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */ 1574 # 1570 # if TMPL_MEM_TYPE_SIZE == 2 1575 1571 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue; 1576 # 1572 # elif TMPL_MEM_TYPE_SIZE == 4 1577 1573 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue; 1578 # 1579 # 1580 # 1574 # else 1575 # error "TMPL_MEM_TYPE_SIZE" 1576 # endif 1581 1577 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE " (r%u)\n", 1582 1578 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg)); … … 1589 1585 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1590 1586 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uOldEsp)); 1591 # 1587 # endif 1592 1588 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg); 1593 1589 } 1594 1590 1595 # 1596 1597 # 1591 # endif /* TMPL_MEM_TYPE_SIZE != 8*/ 1592 1593 # ifdef TMPL_WITH_PUSH_SREG 1598 1594 /** 1599 1595 * 32-bit flat stack segment push function that longjmps on error. … … 1605 1601 RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 1606 1602 { 1607 # 1603 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1608 1604 /* See fallback for details on this weirdness: */ 1609 1605 bool const fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu); … … 1641 1637 * Do the push and return. 1642 1638 */ 1643 # 1644 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1645 # 1639 # ifdef IEM_WITH_TLB_STATISTICS 1640 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1641 # endif 1646 1642 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1647 1643 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1676 1672 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1677 1673 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp)); 1678 # 1674 # endif 1679 1675 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue); 1680 1676 } 1681 # 1682 1683 # 1677 # endif /* TMPL_WITH_PUSH_SREG */ 1678 1679 # if TMPL_MEM_TYPE_SIZE != 4 1684 1680 1685 1681 /** … … 1689 1685 RT_CONCAT3(iemMemFlat64StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 1690 1686 { 1691 # 1687 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1692 1688 /* 1693 1689 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary. 1694 1690 */ 1695 1691 uint64_t const uNewRsp = pVCpu->cpum.GstCtx.rsp - sizeof(TMPL_MEM_TYPE); 1696 # 1692 # if TMPL_MEM_TYPE_SIZE > 1 1697 1693 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewRsp))) 1698 # 1694 # endif 1699 1695 { 1700 1696 /* … … 1720 1716 * Do the push and return. 1721 1717 */ 1722 # 1723 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1724 # 1718 # ifdef IEM_WITH_TLB_STATISTICS 1719 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1720 # endif 1725 1721 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1726 1722 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); … … 1737 1733 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1738 1734 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uNewRsp)); 1739 # 1735 # endif 1740 1736 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue); 1741 1737 } … … 1749 1745 { 1750 1746 Assert(iGReg < 16); 1751 # 1747 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1752 1748 /* 1753 1749 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary. 1754 1750 */ 1755 1751 uint64_t const uOldRsp = pVCpu->cpum.GstCtx.rsp; 1756 # 1752 # if TMPL_MEM_TYPE_SIZE > 1 1757 1753 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldRsp))) 1758 # 1754 # endif 1759 1755 { 1760 1756 /* … … 1779 1775 * Do the push and return. 1780 1776 */ 1781 # 1782 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1783 # 1777 # ifdef IEM_WITH_TLB_STATISTICS 1778 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++; 1779 # endif 1784 1780 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1785 1781 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 1786 1782 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldRsp & GUEST_PAGE_OFFSET_MASK]; 1787 1783 pVCpu->cpum.GstCtx.rsp = uOldRsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */ 1788 # 1784 # if TMPL_MEM_TYPE_SIZE == 2 1789 1785 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue; 1790 # 1786 # elif TMPL_MEM_TYPE_SIZE == 8 1791 1787 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue; 1792 # 1793 # 1794 # 1788 # else 1789 # error "TMPL_MEM_TYPE_SIZE" 1790 # endif 1795 1791 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX64 (->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n", 1796 1792 uOldRsp, uOldRsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg)); … … 1803 1799 outdated page pointer, or other troubles. (This will do a TLB load.) */ 1804 1800 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uOldRsp)); 1805 # 1801 # endif 1806 1802 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg); 1807 1803 } 1808 1804 1809 # endif /* TMPL_MEM_TYPE_SIZE != 4 */ 1810 1811 # endif /* IEM_WITH_SETJMP */ 1812 # endif /* TMPL_MEM_WITH_STACK */ 1813 1814 1815 #endif /* IEM_WITH_SETJMP */ 1805 # endif /* TMPL_MEM_TYPE_SIZE != 4 */ 1806 1807 #endif /* TMPL_MEM_WITH_STACK */ 1808 1816 1809 1817 1810 #undef TMPL_MEM_TYPE -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllN8veRecompFuncs-x86.h
r108269 r108278 77 77 #ifndef IEM_WITH_DATA_TLB 78 78 # error The data TLB must be enabled for the recompiler. 79 #endif80 81 #ifndef IEM_WITH_SETJMP82 # error The setjmp approach must be enabled for the recompiler.83 79 #endif 84 80 -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllOpHlp-x86.cpp
r108260 r108278 59 59 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR. 60 60 * 61 * @return Strict VBox status code.62 * @param pVCpu The cross context virtual CPU structure of the calling thread.63 * @param bRm The ModRM byte.64 * @param cbImmAndRspOffset - First byte: The size of any immediate65 * following the effective address opcode bytes66 * (only for RIP relative addressing).67 * - Second byte: RSP displacement (for POP [ESP]).68 * @param pGCPtrEff Where to return the effective address.69 */70 VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT71 {72 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));73 # define SET_SS_DEF() \74 do \75 { \76 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \77 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \78 } while (0)79 80 if (!IEM_IS_64BIT_CODE(pVCpu))81 {82 /** @todo Check the effective address size crap! */83 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)84 {85 uint16_t u16EffAddr;86 87 /* Handle the disp16 form with no registers first. */88 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)89 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);90 else91 {92 /* Get the displacment. */93 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)94 {95 case 0: u16EffAddr = 0; break;96 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;97 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;98 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */99 }100 101 /* Add the base and index registers to the disp. */102 switch (bRm & X86_MODRM_RM_MASK)103 {104 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;105 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;106 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;107 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;108 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;109 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;110 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;111 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;112 }113 }114 115 *pGCPtrEff = u16EffAddr;116 }117 else118 {119 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);120 uint32_t u32EffAddr;121 122 /* Handle the disp32 form with no registers first. */123 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)124 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);125 else126 {127 /* Get the register (or SIB) value. */128 switch ((bRm & X86_MODRM_RM_MASK))129 {130 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;131 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;132 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;133 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;134 case 4: /* SIB */135 {136 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);137 138 /* Get the index and scale it. */139 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)140 {141 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;142 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;143 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;144 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;145 case 4: u32EffAddr = 0; /*none */ break;146 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;147 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;148 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;149 IEM_NOT_REACHED_DEFAULT_CASE_RET();150 }151 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;152 153 /* add base */154 switch (bSib & X86_SIB_BASE_MASK)155 {156 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;157 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;158 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;159 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;160 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;161 case 5:162 if ((bRm & X86_MODRM_MOD_MASK) != 0)163 {164 u32EffAddr += pVCpu->cpum.GstCtx.ebp;165 SET_SS_DEF();166 }167 else168 {169 uint32_t u32Disp;170 IEM_OPCODE_GET_NEXT_U32(&u32Disp);171 u32EffAddr += u32Disp;172 }173 break;174 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;175 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;176 IEM_NOT_REACHED_DEFAULT_CASE_RET();177 }178 break;179 }180 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;181 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;182 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;183 IEM_NOT_REACHED_DEFAULT_CASE_RET();184 }185 186 /* Get and add the displacement. */187 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)188 {189 case 0:190 break;191 case 1:192 {193 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);194 u32EffAddr += i8Disp;195 break;196 }197 case 2:198 {199 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);200 u32EffAddr += u32Disp;201 break;202 }203 default:204 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */205 }206 207 }208 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);209 *pGCPtrEff = u32EffAddr;210 }211 }212 else213 {214 uint64_t u64EffAddr;215 216 /* Handle the rip+disp32 form with no registers first. */217 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)218 {219 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);220 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));221 }222 else223 {224 /* Get the register (or SIB) value. */225 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)226 {227 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;228 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;229 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;230 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;231 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;232 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;233 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;234 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;235 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;236 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;237 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;238 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;239 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;240 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;241 /* SIB */242 case 4:243 case 12:244 {245 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);246 247 /* Get the index and scale it. */248 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)249 {250 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;251 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;252 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;253 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;254 case 4: u64EffAddr = 0; /*none */ break;255 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;256 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;257 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;258 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;259 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;260 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;261 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;262 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;263 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;264 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;265 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;266 IEM_NOT_REACHED_DEFAULT_CASE_RET();267 }268 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;269 270 /* add base */271 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)272 {273 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;274 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;275 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;276 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;277 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;278 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;279 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;280 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;281 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;282 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;283 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;284 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;285 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;286 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;287 /* complicated encodings */288 case 5:289 case 13:290 if ((bRm & X86_MODRM_MOD_MASK) != 0)291 {292 if (!pVCpu->iem.s.uRexB)293 {294 u64EffAddr += pVCpu->cpum.GstCtx.rbp;295 SET_SS_DEF();296 }297 else298 u64EffAddr += pVCpu->cpum.GstCtx.r13;299 }300 else301 {302 uint32_t u32Disp;303 IEM_OPCODE_GET_NEXT_U32(&u32Disp);304 u64EffAddr += (int32_t)u32Disp;305 }306 break;307 IEM_NOT_REACHED_DEFAULT_CASE_RET();308 }309 break;310 }311 IEM_NOT_REACHED_DEFAULT_CASE_RET();312 }313 314 /* Get and add the displacement. */315 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)316 {317 case 0:318 break;319 case 1:320 {321 int8_t i8Disp;322 IEM_OPCODE_GET_NEXT_S8(&i8Disp);323 u64EffAddr += i8Disp;324 break;325 }326 case 2:327 {328 uint32_t u32Disp;329 IEM_OPCODE_GET_NEXT_U32(&u32Disp);330 u64EffAddr += (int32_t)u32Disp;331 break;332 }333 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */334 }335 336 }337 338 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)339 *pGCPtrEff = u64EffAddr;340 else341 {342 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);343 *pGCPtrEff = u64EffAddr & UINT32_MAX;344 }345 }346 347 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));348 return VINF_SUCCESS;349 }350 351 352 #ifdef IEM_WITH_SETJMP353 /**354 * Calculates the effective address of a ModR/M memory operand.355 *356 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.357 *358 61 * May longjmp on internal error. 359 62 * … … 369 72 { 370 73 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm)); 371 # 74 #define SET_SS_DEF() \ 372 75 do \ 373 76 { \ … … 642 345 return u64EffAddr & UINT32_MAX; 643 346 } 644 #endif /* IEM_WITH_SETJMP */645 646 647 /**648 * Calculates the effective address of a ModR/M memory operand, extended version649 * for use in the recompilers.650 *651 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.652 *653 * @return Strict VBox status code.654 * @param pVCpu The cross context virtual CPU structure of the calling thread.655 * @param bRm The ModRM byte.656 * @param cbImmAndRspOffset - First byte: The size of any immediate657 * following the effective address opcode bytes658 * (only for RIP relative addressing).659 * - Second byte: RSP displacement (for POP [ESP]).660 * @param pGCPtrEff Where to return the effective address.661 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and662 * SIB byte (bits 39:32).663 */664 VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT665 {666 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));667 # define SET_SS_DEF() \668 do \669 { \670 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \671 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \672 } while (0)673 674 uint64_t uInfo;675 if (!IEM_IS_64BIT_CODE(pVCpu))676 {677 /** @todo Check the effective address size crap! */678 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)679 {680 uint16_t u16EffAddr;681 682 /* Handle the disp16 form with no registers first. */683 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)684 {685 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);686 uInfo = u16EffAddr;687 }688 else689 {690 /* Get the displacment. */691 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)692 {693 case 0: u16EffAddr = 0; break;694 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;695 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;696 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */697 }698 uInfo = u16EffAddr;699 700 /* Add the base and index registers to the disp. */701 switch (bRm & X86_MODRM_RM_MASK)702 {703 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;704 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;705 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;706 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;707 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;708 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;709 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;710 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;711 }712 }713 714 *pGCPtrEff = u16EffAddr;715 }716 else717 {718 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);719 uint32_t u32EffAddr;720 721 /* Handle the disp32 form with no registers first. */722 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)723 {724 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);725 uInfo = u32EffAddr;726 }727 else728 {729 /* Get the register (or SIB) value. */730 uInfo = 0;731 switch ((bRm & X86_MODRM_RM_MASK))732 {733 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;734 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;735 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;736 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;737 case 4: /* SIB */738 {739 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);740 uInfo = (uint64_t)bSib << 32;741 742 /* Get the index and scale it. */743 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)744 {745 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;746 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;747 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;748 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;749 case 4: u32EffAddr = 0; /*none */ break;750 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;751 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;752 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;753 IEM_NOT_REACHED_DEFAULT_CASE_RET();754 }755 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;756 757 /* add base */758 switch (bSib & X86_SIB_BASE_MASK)759 {760 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;761 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;762 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;763 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;764 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;765 case 5:766 if ((bRm & X86_MODRM_MOD_MASK) != 0)767 {768 u32EffAddr += pVCpu->cpum.GstCtx.ebp;769 SET_SS_DEF();770 }771 else772 {773 uint32_t u32Disp;774 IEM_OPCODE_GET_NEXT_U32(&u32Disp);775 u32EffAddr += u32Disp;776 uInfo |= u32Disp;777 }778 break;779 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;780 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;781 IEM_NOT_REACHED_DEFAULT_CASE_RET();782 }783 break;784 }785 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;786 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;787 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;788 IEM_NOT_REACHED_DEFAULT_CASE_RET();789 }790 791 /* Get and add the displacement. */792 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)793 {794 case 0:795 break;796 case 1:797 {798 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);799 u32EffAddr += i8Disp;800 uInfo |= (uint32_t)(int32_t)i8Disp;801 break;802 }803 case 2:804 {805 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);806 u32EffAddr += u32Disp;807 uInfo |= (uint32_t)u32Disp;808 break;809 }810 default:811 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */812 }813 814 }815 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);816 *pGCPtrEff = u32EffAddr;817 }818 }819 else820 {821 uint64_t u64EffAddr;822 823 /* Handle the rip+disp32 form with no registers first. */824 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)825 {826 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);827 uInfo = (uint32_t)u64EffAddr;828 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));829 }830 else831 {832 /* Get the register (or SIB) value. */833 uInfo = 0;834 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)835 {836 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;837 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;838 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;839 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;840 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;841 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;842 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;843 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;844 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;845 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;846 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;847 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;848 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;849 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;850 /* SIB */851 case 4:852 case 12:853 {854 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);855 uInfo = (uint64_t)bSib << 32;856 857 /* Get the index and scale it. */858 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)859 {860 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;861 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;862 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;863 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;864 case 4: u64EffAddr = 0; /*none */ break;865 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;866 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;867 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;868 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;869 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;870 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;871 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;872 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;873 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;874 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;875 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;876 IEM_NOT_REACHED_DEFAULT_CASE_RET();877 }878 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;879 880 /* add base */881 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)882 {883 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;884 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;885 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;886 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;887 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;888 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;889 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;890 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;891 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;892 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;893 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;894 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;895 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;896 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;897 /* complicated encodings */898 case 5:899 case 13:900 if ((bRm & X86_MODRM_MOD_MASK) != 0)901 {902 if (!pVCpu->iem.s.uRexB)903 {904 u64EffAddr += pVCpu->cpum.GstCtx.rbp;905 SET_SS_DEF();906 }907 else908 u64EffAddr += pVCpu->cpum.GstCtx.r13;909 }910 else911 {912 uint32_t u32Disp;913 IEM_OPCODE_GET_NEXT_U32(&u32Disp);914 u64EffAddr += (int32_t)u32Disp;915 uInfo |= u32Disp;916 }917 break;918 IEM_NOT_REACHED_DEFAULT_CASE_RET();919 }920 break;921 }922 IEM_NOT_REACHED_DEFAULT_CASE_RET();923 }924 925 /* Get and add the displacement. */926 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)927 {928 case 0:929 break;930 case 1:931 {932 int8_t i8Disp;933 IEM_OPCODE_GET_NEXT_S8(&i8Disp);934 u64EffAddr += i8Disp;935 uInfo |= (uint32_t)(int32_t)i8Disp;936 break;937 }938 case 2:939 {940 uint32_t u32Disp;941 IEM_OPCODE_GET_NEXT_U32(&u32Disp);942 u64EffAddr += (int32_t)u32Disp;943 uInfo |= u32Disp;944 break;945 }946 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */947 }948 949 }950 951 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)952 *pGCPtrEff = u64EffAddr;953 else954 {955 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);956 *pGCPtrEff = u64EffAddr & UINT32_MAX;957 }958 }959 *puInfo = uInfo;960 961 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));962 return VINF_SUCCESS;963 }964 347 965 348 /** @} */ -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllOpcodeFetch-x86.cpp
r108260 r108278 736 736 737 737 #endif /* !IEM_WITH_CODE_TLB */ 738 #ifndef IEM_WITH_SETJMP739 740 /**741 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.742 *743 * @returns Strict VBox status code.744 * @param pVCpu The cross context virtual CPU structure of the745 * calling thread.746 * @param pb Where to return the opcode byte.747 */748 VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT749 {750 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);751 if (rcStrict == VINF_SUCCESS)752 {753 uint8_t offOpcode = pVCpu->iem.s.offOpcode;754 *pb = pVCpu->iem.s.abOpcode[offOpcode];755 pVCpu->iem.s.offOpcode = offOpcode + 1;756 }757 else758 *pb = 0;759 return rcStrict;760 }761 762 #else /* IEM_WITH_SETJMP */763 738 764 739 /** … … 770 745 uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 771 746 { 772 # 747 #ifdef IEM_WITH_CODE_TLB 773 748 uint8_t u8; 774 749 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8); 775 750 return u8; 776 # 751 #else 777 752 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1); 778 753 if (rcStrict == VINF_SUCCESS) 779 754 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++]; 780 755 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)); 781 # 756 #endif 782 757 } 783 758 784 #endif /* IEM_WITH_SETJMP */785 786 #ifndef IEM_WITH_SETJMP787 788 /**789 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.790 *791 * @returns Strict VBox status code.792 * @param pVCpu The cross context virtual CPU structure of the calling thread.793 * @param pu16 Where to return the opcode dword.794 */795 VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT796 {797 uint8_t u8;798 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);799 if (rcStrict == VINF_SUCCESS)800 *pu16 = (int8_t)u8;801 return rcStrict;802 }803 804 805 /**806 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.807 *808 * @returns Strict VBox status code.809 * @param pVCpu The cross context virtual CPU structure of the calling thread.810 * @param pu32 Where to return the opcode dword.811 */812 VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT813 {814 uint8_t u8;815 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);816 if (rcStrict == VINF_SUCCESS)817 *pu32 = (int8_t)u8;818 return rcStrict;819 }820 821 822 /**823 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.824 *825 * @returns Strict VBox status code.826 * @param pVCpu The cross context virtual CPU structure of the calling thread.827 * @param pu64 Where to return the opcode qword.828 */829 VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT830 {831 uint8_t u8;832 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);833 if (rcStrict == VINF_SUCCESS)834 *pu64 = (int8_t)u8;835 return rcStrict;836 }837 838 #endif /* !IEM_WITH_SETJMP */839 840 841 #ifndef IEM_WITH_SETJMP842 843 /**844 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.845 *846 * @returns Strict VBox status code.847 * @param pVCpu The cross context virtual CPU structure of the calling thread.848 * @param pu16 Where to return the opcode word.849 */850 VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT851 {852 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);853 if (rcStrict == VINF_SUCCESS)854 {855 uint8_t offOpcode = pVCpu->iem.s.offOpcode;856 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS857 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];858 # else859 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);860 # endif861 pVCpu->iem.s.offOpcode = offOpcode + 2;862 }863 else864 *pu16 = 0;865 return rcStrict;866 }867 868 #else /* IEM_WITH_SETJMP */869 759 870 760 /** … … 876 766 uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 877 767 { 878 # 768 #ifdef IEM_WITH_CODE_TLB 879 769 uint16_t u16; 880 770 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16); 881 771 return u16; 882 # 772 #else 883 773 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2); 884 774 if (rcStrict == VINF_SUCCESS) … … 886 776 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 887 777 pVCpu->iem.s.offOpcode += 2; 888 # 778 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 889 779 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 890 # 780 # else 891 781 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 892 # 782 # endif 893 783 } 894 784 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)); 895 # 785 #endif 896 786 } 897 787 898 #endif /* IEM_WITH_SETJMP */899 900 #ifndef IEM_WITH_SETJMP901 902 /**903 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.904 *905 * @returns Strict VBox status code.906 * @param pVCpu The cross context virtual CPU structure of the calling thread.907 * @param pu32 Where to return the opcode double word.908 */909 VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT910 {911 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);912 if (rcStrict == VINF_SUCCESS)913 {914 uint8_t offOpcode = pVCpu->iem.s.offOpcode;915 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);916 pVCpu->iem.s.offOpcode = offOpcode + 2;917 }918 else919 *pu32 = 0;920 return rcStrict;921 }922 923 924 /**925 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.926 *927 * @returns Strict VBox status code.928 * @param pVCpu The cross context virtual CPU structure of the calling thread.929 * @param pu64 Where to return the opcode quad word.930 */931 VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT932 {933 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);934 if (rcStrict == VINF_SUCCESS)935 {936 uint8_t offOpcode = pVCpu->iem.s.offOpcode;937 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);938 pVCpu->iem.s.offOpcode = offOpcode + 2;939 }940 else941 *pu64 = 0;942 return rcStrict;943 }944 945 #endif /* !IEM_WITH_SETJMP */946 947 #ifndef IEM_WITH_SETJMP948 949 /**950 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.951 *952 * @returns Strict VBox status code.953 * @param pVCpu The cross context virtual CPU structure of the calling thread.954 * @param pu32 Where to return the opcode dword.955 */956 VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT957 {958 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);959 if (rcStrict == VINF_SUCCESS)960 {961 uint8_t offOpcode = pVCpu->iem.s.offOpcode;962 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS963 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];964 # else965 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],966 pVCpu->iem.s.abOpcode[offOpcode + 1],967 pVCpu->iem.s.abOpcode[offOpcode + 2],968 pVCpu->iem.s.abOpcode[offOpcode + 3]);969 # endif970 pVCpu->iem.s.offOpcode = offOpcode + 4;971 }972 else973 *pu32 = 0;974 return rcStrict;975 }976 977 #else /* IEM_WITH_SETJMP */978 788 979 789 /** … … 985 795 uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 986 796 { 987 # 797 #ifdef IEM_WITH_CODE_TLB 988 798 uint32_t u32; 989 799 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32); 990 800 return u32; 991 # 801 #else 992 802 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4); 993 803 if (rcStrict == VINF_SUCCESS) … … 995 805 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 996 806 pVCpu->iem.s.offOpcode = offOpcode + 4; 997 # 807 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 998 808 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 999 # 809 # else 1000 810 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 1001 811 pVCpu->iem.s.abOpcode[offOpcode + 1], 1002 812 pVCpu->iem.s.abOpcode[offOpcode + 2], 1003 813 pVCpu->iem.s.abOpcode[offOpcode + 3]); 1004 # 814 # endif 1005 815 } 1006 816 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)); 1007 # 817 #endif 1008 818 } 1009 819 1010 #endif /* IEM_WITH_SETJMP */1011 1012 #ifndef IEM_WITH_SETJMP1013 1014 /**1015 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.1016 *1017 * @returns Strict VBox status code.1018 * @param pVCpu The cross context virtual CPU structure of the calling thread.1019 * @param pu64 Where to return the opcode dword.1020 */1021 VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT1022 {1023 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);1024 if (rcStrict == VINF_SUCCESS)1025 {1026 uint8_t offOpcode = pVCpu->iem.s.offOpcode;1027 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],1028 pVCpu->iem.s.abOpcode[offOpcode + 1],1029 pVCpu->iem.s.abOpcode[offOpcode + 2],1030 pVCpu->iem.s.abOpcode[offOpcode + 3]);1031 pVCpu->iem.s.offOpcode = offOpcode + 4;1032 }1033 else1034 *pu64 = 0;1035 return rcStrict;1036 }1037 1038 1039 /**1040 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.1041 *1042 * @returns Strict VBox status code.1043 * @param pVCpu The cross context virtual CPU structure of the calling thread.1044 * @param pu64 Where to return the opcode qword.1045 */1046 VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT1047 {1048 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);1049 if (rcStrict == VINF_SUCCESS)1050 {1051 uint8_t offOpcode = pVCpu->iem.s.offOpcode;1052 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],1053 pVCpu->iem.s.abOpcode[offOpcode + 1],1054 pVCpu->iem.s.abOpcode[offOpcode + 2],1055 pVCpu->iem.s.abOpcode[offOpcode + 3]);1056 pVCpu->iem.s.offOpcode = offOpcode + 4;1057 }1058 else1059 *pu64 = 0;1060 return rcStrict;1061 }1062 1063 #endif /* !IEM_WITH_SETJMP */1064 1065 #ifndef IEM_WITH_SETJMP1066 1067 /**1068 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.1069 *1070 * @returns Strict VBox status code.1071 * @param pVCpu The cross context virtual CPU structure of the calling thread.1072 * @param pu64 Where to return the opcode qword.1073 */1074 VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT1075 {1076 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);1077 if (rcStrict == VINF_SUCCESS)1078 {1079 uint8_t offOpcode = pVCpu->iem.s.offOpcode;1080 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS1081 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];1082 # else1083 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],1084 pVCpu->iem.s.abOpcode[offOpcode + 1],1085 pVCpu->iem.s.abOpcode[offOpcode + 2],1086 pVCpu->iem.s.abOpcode[offOpcode + 3],1087 pVCpu->iem.s.abOpcode[offOpcode + 4],1088 pVCpu->iem.s.abOpcode[offOpcode + 5],1089 pVCpu->iem.s.abOpcode[offOpcode + 6],1090 pVCpu->iem.s.abOpcode[offOpcode + 7]);1091 # endif1092 pVCpu->iem.s.offOpcode = offOpcode + 8;1093 }1094 else1095 *pu64 = 0;1096 return rcStrict;1097 }1098 1099 #else /* IEM_WITH_SETJMP */1100 820 1101 821 /** … … 1107 827 uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 1108 828 { 1109 # 829 #ifdef IEM_WITH_CODE_TLB 1110 830 uint64_t u64; 1111 831 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64); 1112 832 return u64; 1113 # 833 #else 1114 834 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8); 1115 835 if (rcStrict == VINF_SUCCESS) … … 1117 837 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 1118 838 pVCpu->iem.s.offOpcode = offOpcode + 8; 1119 # 839 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 1120 840 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 1121 # 841 # else 1122 842 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 1123 843 pVCpu->iem.s.abOpcode[offOpcode + 1], … … 1128 848 pVCpu->iem.s.abOpcode[offOpcode + 6], 1129 849 pVCpu->iem.s.abOpcode[offOpcode + 7]); 1130 # 850 # endif 1131 851 } 1132 852 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)); 1133 # 853 #endif 1134 854 } 1135 855 1136 #endif /* IEM_WITH_SETJMP */1137 -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllThrdTables-x86.h
r108260 r108278 104 104 #endif 105 105 106 #ifndef IEM_WITH_SETJMP107 # error The setjmp approach must be enabled for the recompiler.108 #endif109 110 106 111 107 /********************************************************************************************************************************* … … 133 129 */ 134 130 #undef IEM_MC_CALC_RM_EFF_ADDR 135 #ifndef IEM_WITH_SETJMP 136 # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \ 137 uint64_t uEffAddrInfo; \ 138 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &(a_GCPtrEff), &uEffAddrInfo)) 139 #else 140 # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \ 131 #define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \ 141 132 uint64_t uEffAddrInfo; \ 142 133 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &uEffAddrInfo)) 143 #endif144 134 145 135 /* -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllXcpt-x86.cpp
r108260 r108278 2601 2601 } 2602 2602 2603 #ifdef IEM_WITH_SETJMP2604 2603 /** 2605 2604 * See iemRaiseXcptOrInt. Will not return. … … 2616 2615 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)); 2617 2616 } 2618 #endif2619 2617 2620 2618 … … 2635 2633 2636 2634 2637 #ifdef IEM_WITH_SETJMP2638 2635 /** \#DE - 00. */ 2639 2636 DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP … … 2641 2638 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0); 2642 2639 } 2643 #endif2644 2640 2645 2641 … … 2668 2664 2669 2665 2670 #ifdef IEM_WITH_SETJMP2671 2666 /** \#UD - 06. */ 2672 2667 DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP … … 2674 2669 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0); 2675 2670 } 2676 #endif2677 2671 2678 2672 … … 2684 2678 2685 2679 2686 #ifdef IEM_WITH_SETJMP2687 2680 /** \#NM - 07. */ 2688 2681 DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP … … 2690 2683 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0); 2691 2684 } 2692 #endif2693 2685 2694 2686 … … 2775 2767 } 2776 2768 2777 #ifdef IEM_WITH_SETJMP 2769 2778 2770 /** \#GP(0) - 0d. */ 2779 2771 DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP … … 2782 2774 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 2783 2775 } 2784 #endif2785 2776 2786 2777 … … 2813 2804 } 2814 2805 2815 #ifdef IEM_WITH_SETJMP 2806 2816 2807 /** \#GP(sel) - 0d, longjmp. */ 2817 2808 DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP … … 2823 2814 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 2824 2815 } 2825 #endif 2816 2826 2817 2827 2818 /** \#GP(sel) - 0d. */ … … 2834 2825 } 2835 2826 2836 #ifdef IEM_WITH_SETJMP 2827 2837 2828 /** \#GP(sel) - 0d, longjmp. */ 2838 2829 DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP … … 2843 2834 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 2844 2835 } 2845 #endif2846 2836 2847 2837 … … 2855 2845 } 2856 2846 2857 #ifdef IEM_WITH_SETJMP 2847 2858 2848 /** \#GP(sel) - 0d, longjmp. */ 2859 2849 DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP … … 2862 2852 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 2863 2853 } 2864 #endif2865 2854 2866 2855 … … 2930 2919 } 2931 2920 2932 #ifdef IEM_WITH_SETJMP 2921 2933 2922 /** \#PF(n) - 0e, longjmp. */ 2934 2923 DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, … … 2937 2926 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc))); 2938 2927 } 2939 #endif2940 2928 2941 2929 … … 2951 2939 } 2952 2940 2953 #ifdef IEM_WITH_SETJMP 2941 2954 2942 /** \#MF(0) - 10, longjmp. */ 2955 2943 DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP … … 2957 2945 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu))); 2958 2946 } 2959 #endif2960 2947 2961 2948 … … 2966 2953 } 2967 2954 2968 #ifdef IEM_WITH_SETJMP 2955 2969 2956 /** \#AC(0) - 11, longjmp. */ 2970 2957 DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP … … 2972 2959 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu))); 2973 2960 } 2974 #endif2975 2961 2976 2962 … … 2982 2968 2983 2969 2984 #ifdef IEM_WITH_SETJMP2985 2970 /** \#XF(0)/\#XM(0) - 19s, longjmp. */ 2986 2971 DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP … … 2988 2973 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu))); 2989 2974 } 2990 #endif2991 2975 2992 2976 -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMInlineDecode-x86.h
r108260 r108278 36 36 37 37 #ifndef IEM_WITH_OPAQUE_DECODER_STATE 38 39 # ifndef IEM_WITH_SETJMP40 41 /**42 * Fetches the first opcode byte.43 *44 * @returns Strict VBox status code.45 * @param pVCpu The cross context virtual CPU structure of the46 * calling thread.47 * @param pu8 Where to return the opcode byte.48 */49 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetFirstU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT50 {51 /*52 * Check for hardware instruction breakpoints.53 * Note! Guest breakpoints are only checked after POP SS or MOV SS on AMD CPUs.54 */55 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))56 { /* likely */ }57 else58 {59 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,60 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base,61 !(pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_SHADOW_SS)62 || IEM_IS_GUEST_CPU_AMD(pVCpu));63 if (RT_LIKELY(rcStrict == VINF_SUCCESS))64 { /* likely */ }65 else66 {67 *pu8 = 0xff; /* shut up gcc. sigh */68 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)69 return iemRaiseDebugException(pVCpu);70 return rcStrict;71 }72 }73 74 /*75 * Fetch the first opcode byte.76 */77 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;78 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))79 {80 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;81 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];82 return VINF_SUCCESS;83 }84 return iemOpcodeGetNextU8Slow(pVCpu, pu8);85 }86 87 # else /* IEM_WITH_SETJMP */88 38 89 39 /** … … 120 70 * Fetch the first opcode byte. 121 71 */ 122 # 72 # ifdef IEM_WITH_CODE_TLB 123 73 uint8_t bRet; 124 74 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; … … 132 82 else 133 83 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu); 134 # 84 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 135 85 Assert(pVCpu->iem.s.offOpcode == 0); 136 86 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet; 137 # 87 # endif 138 88 return bRet; 139 89 140 # 90 # else /* !IEM_WITH_CODE_TLB */ 141 91 uintptr_t offOpcode = pVCpu->iem.s.offOpcode; 142 92 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode)) … … 146 96 } 147 97 return iemOpcodeGetNextU8SlowJmp(pVCpu); 148 # endif 149 } 150 151 # endif /* IEM_WITH_SETJMP */ 98 # endif 99 } 152 100 153 101 /** … … 157 105 * @remark Implicitly references pVCpu. 158 106 */ 159 # ifndef IEM_WITH_SETJMP 160 # define IEM_OPCODE_GET_FIRST_U8(a_pu8) \ 161 do \ 162 { \ 163 VBOXSTRICTRC rcStrict2 = iemOpcodeGetFirstU8(pVCpu, (a_pu8)); \ 164 if (rcStrict2 == VINF_SUCCESS) \ 165 { /* likely */ } \ 166 else \ 167 return rcStrict2; \ 168 } while (0) 169 # else 170 # define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu)) 171 # endif /* IEM_WITH_SETJMP */ 172 173 174 # ifndef IEM_WITH_SETJMP 175 176 /** 177 * Fetches the next opcode byte. 178 * 179 * @returns Strict VBox status code. 180 * @param pVCpu The cross context virtual CPU structure of the 181 * calling thread. 182 * @param pu8 Where to return the opcode byte. 183 */ 184 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT 185 { 186 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 187 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode)) 188 { 189 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1; 190 *pu8 = pVCpu->iem.s.abOpcode[offOpcode]; 191 return VINF_SUCCESS; 192 } 193 return iemOpcodeGetNextU8Slow(pVCpu, pu8); 194 } 195 196 # else /* IEM_WITH_SETJMP */ 107 # define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu)) 108 197 109 198 110 /** … … 204 116 DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 205 117 { 206 # 118 # ifdef IEM_WITH_CODE_TLB 207 119 uint8_t bRet; 208 120 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; … … 216 128 else 217 129 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu); 218 # 130 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 219 131 Assert(pVCpu->iem.s.offOpcode < sizeof(pVCpu->iem.s.abOpcode)); 220 132 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet; 221 # 133 # endif 222 134 return bRet; 223 135 224 # 136 # else /* !IEM_WITH_CODE_TLB */ 225 137 uintptr_t offOpcode = pVCpu->iem.s.offOpcode; 226 138 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode)) … … 230 142 } 231 143 return iemOpcodeGetNextU8SlowJmp(pVCpu); 232 # endif 233 } 234 235 # endif /* IEM_WITH_SETJMP */ 144 # endif 145 } 236 146 237 147 /** … … 241 151 * @remark Implicitly references pVCpu. 242 152 */ 243 # ifndef IEM_WITH_SETJMP 244 # define IEM_OPCODE_GET_NEXT_U8(a_pu8) \ 245 do \ 246 { \ 247 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \ 248 if (rcStrict2 == VINF_SUCCESS) \ 249 { /* likely */ } \ 250 else \ 251 return rcStrict2; \ 252 } while (0) 253 # else 254 # define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu)) 255 # endif /* IEM_WITH_SETJMP */ 256 257 258 # ifndef IEM_WITH_SETJMP 259 /** 260 * Fetches the next signed byte from the opcode stream. 261 * 262 * @returns Strict VBox status code. 263 * @param pVCpu The cross context virtual CPU structure of the calling thread. 264 * @param pi8 Where to return the signed byte. 265 */ 266 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8) RT_NOEXCEPT 267 { 268 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8); 269 } 270 # endif /* !IEM_WITH_SETJMP */ 271 153 # define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu)) 272 154 273 155 /** … … 278 160 * @remark Implicitly references pVCpu. 279 161 */ 280 # ifndef IEM_WITH_SETJMP 281 # define IEM_OPCODE_GET_NEXT_S8(a_pi8) \ 282 do \ 283 { \ 284 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \ 285 if (rcStrict2 != VINF_SUCCESS) \ 286 return rcStrict2; \ 287 } while (0) 288 # else /* IEM_WITH_SETJMP */ 289 # define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 290 291 # endif /* IEM_WITH_SETJMP */ 292 293 294 # ifndef IEM_WITH_SETJMP 295 /** 296 * Fetches the next signed byte from the opcode stream, extending it to 297 * unsigned 16-bit. 298 * 299 * @returns Strict VBox status code. 300 * @param pVCpu The cross context virtual CPU structure of the calling thread. 301 * @param pu16 Where to return the unsigned word. 302 */ 303 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT 304 { 305 uint8_t const offOpcode = pVCpu->iem.s.offOpcode; 306 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode)) 307 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16); 308 309 *pu16 = (uint16_t)(int16_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode]; 310 pVCpu->iem.s.offOpcode = offOpcode + 1; 311 return VINF_SUCCESS; 312 } 313 # endif /* !IEM_WITH_SETJMP */ 162 # define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 314 163 315 164 /** … … 320 169 * @remark Implicitly references pVCpu. 321 170 */ 322 # ifndef IEM_WITH_SETJMP 323 # define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \ 324 do \ 325 { \ 326 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \ 327 if (rcStrict2 != VINF_SUCCESS) \ 328 return rcStrict2; \ 329 } while (0) 330 # else 331 # define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (uint16_t)(int16_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 332 # endif 333 334 # ifndef IEM_WITH_SETJMP 335 /** 336 * Fetches the next signed byte from the opcode stream, extending it to 337 * unsigned 32-bit. 338 * 339 * @returns Strict VBox status code. 340 * @param pVCpu The cross context virtual CPU structure of the calling thread. 341 * @param pu32 Where to return the unsigned dword. 342 */ 343 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT 344 { 345 uint8_t const offOpcode = pVCpu->iem.s.offOpcode; 346 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode)) 347 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32); 348 349 *pu32 = (uint32_t)(int32_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode]; 350 pVCpu->iem.s.offOpcode = offOpcode + 1; 351 return VINF_SUCCESS; 352 } 353 # endif /* !IEM_WITH_SETJMP */ 171 # define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (uint16_t)(int16_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 354 172 355 173 /** … … 360 178 * @remark Implicitly references pVCpu. 361 179 */ 362 # ifndef IEM_WITH_SETJMP 363 # define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \ 364 do \ 365 { \ 366 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \ 367 if (rcStrict2 != VINF_SUCCESS) \ 368 return rcStrict2; \ 369 } while (0) 370 # else 371 # define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (uint32_t)(int32_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 372 # endif 373 374 375 # ifndef IEM_WITH_SETJMP 376 /** 377 * Fetches the next signed byte from the opcode stream, extending it to 378 * unsigned 64-bit. 379 * 380 * @returns Strict VBox status code. 381 * @param pVCpu The cross context virtual CPU structure of the calling thread. 382 * @param pu64 Where to return the unsigned qword. 383 */ 384 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT 385 { 386 uint8_t const offOpcode = pVCpu->iem.s.offOpcode; 387 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode)) 388 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64); 389 390 *pu64 = (uint64_t)(int64_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode]; 391 pVCpu->iem.s.offOpcode = offOpcode + 1; 392 return VINF_SUCCESS; 393 } 394 # endif /* !IEM_WITH_SETJMP */ 180 # define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (uint32_t)(int32_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 395 181 396 182 /** … … 401 187 * @remark Implicitly references pVCpu. 402 188 */ 403 # ifndef IEM_WITH_SETJMP 404 # define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \ 405 do \ 406 { \ 407 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \ 408 if (rcStrict2 != VINF_SUCCESS) \ 409 return rcStrict2; \ 410 } while (0) 411 # else 412 # define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 413 # endif 414 415 416 # ifndef IEM_WITH_SETJMP 417 418 /** 419 * Fetches the next opcode word. 420 * 421 * @returns Strict VBox status code. 422 * @param pVCpu The cross context virtual CPU structure of the calling thread. 423 * @param pu16 Where to return the opcode word. 424 */ 425 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT 426 { 427 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 428 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode)) 429 { 430 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2; 431 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 432 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 433 # else 434 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 435 # endif 436 return VINF_SUCCESS; 437 } 438 return iemOpcodeGetNextU16Slow(pVCpu, pu16); 439 } 440 441 # else /* IEM_WITH_SETJMP */ 189 # define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 190 442 191 443 192 /** … … 449 198 DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 450 199 { 451 # 200 # ifdef IEM_WITH_CODE_TLB 452 201 uint16_t u16Ret; 453 202 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; … … 457 206 { 458 207 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2; 459 # 208 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 460 209 u16Ret = *(uint16_t const *)&pbBuf[offBuf]; 461 # 210 # else 462 211 u16Ret = RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]); 463 # 212 # endif 464 213 } 465 214 else 466 215 u16Ret = iemOpcodeGetNextU16SlowJmp(pVCpu); 467 216 468 # 217 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 469 218 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 470 219 Assert(offOpcode + 1 < sizeof(pVCpu->iem.s.abOpcode)); 471 # 220 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 472 221 *(uint16_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u16Ret; 473 # 222 # else 474 223 pVCpu->iem.s.abOpcode[offOpcode] = RT_LO_U8(u16Ret); 475 224 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_HI_U8(u16Ret); 476 # 225 # endif 477 226 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)2; 478 # 227 # endif 479 228 480 229 return u16Ret; 481 230 482 # 231 # else /* !IEM_WITH_CODE_TLB */ 483 232 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 484 233 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode)) 485 234 { 486 235 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2; 487 # 236 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 488 237 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 489 # 238 # else 490 239 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 491 # 240 # endif 492 241 } 493 242 return iemOpcodeGetNextU16SlowJmp(pVCpu); 494 # endif /* !IEM_WITH_CODE_TLB */ 495 } 496 497 # endif /* IEM_WITH_SETJMP */ 243 # endif /* !IEM_WITH_CODE_TLB */ 244 } 498 245 499 246 /** … … 503 250 * @remark Implicitly references pVCpu. 504 251 */ 505 # ifndef IEM_WITH_SETJMP 506 # define IEM_OPCODE_GET_NEXT_U16(a_pu16) \ 507 do \ 508 { \ 509 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \ 510 if (rcStrict2 != VINF_SUCCESS) \ 511 return rcStrict2; \ 512 } while (0) 513 # else 514 # define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu)) 515 # endif 516 517 # ifndef IEM_WITH_SETJMP 518 /** 519 * Fetches the next opcode word, zero extending it to a double word. 520 * 521 * @returns Strict VBox status code. 522 * @param pVCpu The cross context virtual CPU structure of the calling thread. 523 * @param pu32 Where to return the opcode double word. 524 */ 525 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT 526 { 527 uint8_t const offOpcode = pVCpu->iem.s.offOpcode; 528 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode)) 529 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32); 530 531 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 532 pVCpu->iem.s.offOpcode = offOpcode + 2; 533 return VINF_SUCCESS; 534 } 535 # endif /* !IEM_WITH_SETJMP */ 252 # define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu)) 536 253 537 254 /** … … 542 259 * @remark Implicitly references pVCpu. 543 260 */ 544 # ifndef IEM_WITH_SETJMP 545 # define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \ 546 do \ 547 { \ 548 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \ 549 if (rcStrict2 != VINF_SUCCESS) \ 550 return rcStrict2; \ 551 } while (0) 552 # else 553 # define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu)) 554 # endif 555 556 # ifndef IEM_WITH_SETJMP 557 /** 558 * Fetches the next opcode word, zero extending it to a quad word. 559 * 560 * @returns Strict VBox status code. 561 * @param pVCpu The cross context virtual CPU structure of the calling thread. 562 * @param pu64 Where to return the opcode quad word. 563 */ 564 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT 565 { 566 uint8_t const offOpcode = pVCpu->iem.s.offOpcode; 567 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode)) 568 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64); 569 570 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 571 pVCpu->iem.s.offOpcode = offOpcode + 2; 572 return VINF_SUCCESS; 573 } 574 # endif /* !IEM_WITH_SETJMP */ 261 # define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu)) 575 262 576 263 /** … … 581 268 * @remark Implicitly references pVCpu. 582 269 */ 583 # ifndef IEM_WITH_SETJMP 584 # define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \ 585 do \ 586 { \ 587 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \ 588 if (rcStrict2 != VINF_SUCCESS) \ 589 return rcStrict2; \ 590 } while (0) 591 # else 592 # define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu)) 593 # endif 594 595 596 # ifndef IEM_WITH_SETJMP 597 /** 598 * Fetches the next signed word from the opcode stream. 599 * 600 * @returns Strict VBox status code. 601 * @param pVCpu The cross context virtual CPU structure of the calling thread. 602 * @param pi16 Where to return the signed word. 603 */ 604 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16) RT_NOEXCEPT 605 { 606 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16); 607 } 608 # endif /* !IEM_WITH_SETJMP */ 609 270 # define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu)) 610 271 611 272 /** … … 616 277 * @remark Implicitly references pVCpu. 617 278 */ 618 # ifndef IEM_WITH_SETJMP 619 # define IEM_OPCODE_GET_NEXT_S16(a_pi16) \ 620 do \ 621 { \ 622 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \ 623 if (rcStrict2 != VINF_SUCCESS) \ 624 return rcStrict2; \ 625 } while (0) 626 # else 627 # define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu)) 628 # endif 629 630 # ifndef IEM_WITH_SETJMP 631 632 /** 633 * Fetches the next opcode dword. 634 * 635 * @returns Strict VBox status code. 636 * @param pVCpu The cross context virtual CPU structure of the calling thread. 637 * @param pu32 Where to return the opcode double word. 638 */ 639 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT 640 { 641 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 642 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode)) 643 { 644 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4; 645 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 646 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 647 # else 648 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 649 pVCpu->iem.s.abOpcode[offOpcode + 1], 650 pVCpu->iem.s.abOpcode[offOpcode + 2], 651 pVCpu->iem.s.abOpcode[offOpcode + 3]); 652 # endif 653 return VINF_SUCCESS; 654 } 655 return iemOpcodeGetNextU32Slow(pVCpu, pu32); 656 } 657 658 # else /* IEM_WITH_SETJMP */ 279 # define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu)) 280 659 281 660 282 /** … … 666 288 DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 667 289 { 668 # 290 # ifdef IEM_WITH_CODE_TLB 669 291 uint32_t u32Ret; 670 292 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; … … 674 296 { 675 297 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4; 676 # 298 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 677 299 u32Ret = *(uint32_t const *)&pbBuf[offBuf]; 678 # 300 # else 679 301 u32Ret = RT_MAKE_U32_FROM_U8(pbBuf[offBuf], 680 302 pbBuf[offBuf + 1], 681 303 pbBuf[offBuf + 2], 682 304 pbBuf[offBuf + 3]); 683 # 305 # endif 684 306 } 685 307 else 686 308 u32Ret = iemOpcodeGetNextU32SlowJmp(pVCpu); 687 309 688 # 310 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 689 311 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 690 312 Assert(offOpcode + 3 < sizeof(pVCpu->iem.s.abOpcode)); 691 # 313 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 692 314 *(uint32_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u32Ret; 693 # 315 # else 694 316 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u32Ret); 695 317 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u32Ret); 696 318 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u32Ret); 697 319 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u32Ret); 698 # 320 # endif 699 321 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)4; 700 # 322 # endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */ 701 323 702 324 return u32Ret; 703 325 704 # 326 # else /* !IEM_WITH_CODE_TLB */ 705 327 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 706 328 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode)) 707 329 { 708 330 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4; 709 # 331 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 710 332 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 711 # 333 # else 712 334 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 713 335 pVCpu->iem.s.abOpcode[offOpcode + 1], 714 336 pVCpu->iem.s.abOpcode[offOpcode + 2], 715 337 pVCpu->iem.s.abOpcode[offOpcode + 3]); 716 # 338 # endif 717 339 } 718 340 return iemOpcodeGetNextU32SlowJmp(pVCpu); 719 # endif 720 } 721 722 # endif /* IEM_WITH_SETJMP */ 341 # endif 342 } 723 343 724 344 /** … … 728 348 * @remark Implicitly references pVCpu. 729 349 */ 730 # ifndef IEM_WITH_SETJMP 731 # define IEM_OPCODE_GET_NEXT_U32(a_pu32) \ 732 do \ 733 { \ 734 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \ 735 if (rcStrict2 != VINF_SUCCESS) \ 736 return rcStrict2; \ 737 } while (0) 738 # else 739 # define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu)) 740 # endif 741 742 # ifndef IEM_WITH_SETJMP 743 /** 744 * Fetches the next opcode dword, zero extending it to a quad word. 745 * 746 * @returns Strict VBox status code. 747 * @param pVCpu The cross context virtual CPU structure of the calling thread. 748 * @param pu64 Where to return the opcode quad word. 749 */ 750 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT 751 { 752 uint8_t const offOpcode = pVCpu->iem.s.offOpcode; 753 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode)) 754 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64); 755 756 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 757 pVCpu->iem.s.abOpcode[offOpcode + 1], 758 pVCpu->iem.s.abOpcode[offOpcode + 2], 759 pVCpu->iem.s.abOpcode[offOpcode + 3]); 760 pVCpu->iem.s.offOpcode = offOpcode + 4; 761 return VINF_SUCCESS; 762 } 763 # endif /* !IEM_WITH_SETJMP */ 350 # define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu)) 764 351 765 352 /** … … 770 357 * @remark Implicitly references pVCpu. 771 358 */ 772 # ifndef IEM_WITH_SETJMP 773 # define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \ 774 do \ 775 { \ 776 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \ 777 if (rcStrict2 != VINF_SUCCESS) \ 778 return rcStrict2; \ 779 } while (0) 780 # else 781 # define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu)) 782 # endif 783 784 785 # ifndef IEM_WITH_SETJMP 786 /** 787 * Fetches the next signed double word from the opcode stream. 788 * 789 * @returns Strict VBox status code. 790 * @param pVCpu The cross context virtual CPU structure of the calling thread. 791 * @param pi32 Where to return the signed double word. 792 */ 793 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32) RT_NOEXCEPT 794 { 795 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32); 796 } 797 # endif 359 # define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu)) 798 360 799 361 /** … … 804 366 * @remark Implicitly references pVCpu. 805 367 */ 806 # ifndef IEM_WITH_SETJMP 807 # define IEM_OPCODE_GET_NEXT_S32(a_pi32) \ 808 do \ 809 { \ 810 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \ 811 if (rcStrict2 != VINF_SUCCESS) \ 812 return rcStrict2; \ 813 } while (0) 814 # else 815 # define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu)) 816 # endif 817 818 # ifndef IEM_WITH_SETJMP 819 /** 820 * Fetches the next opcode dword, sign extending it into a quad word. 821 * 822 * @returns Strict VBox status code. 823 * @param pVCpu The cross context virtual CPU structure of the calling thread. 824 * @param pu64 Where to return the opcode quad word. 825 */ 826 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT 827 { 828 uint8_t const offOpcode = pVCpu->iem.s.offOpcode; 829 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode)) 830 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64); 831 832 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 833 pVCpu->iem.s.abOpcode[offOpcode + 1], 834 pVCpu->iem.s.abOpcode[offOpcode + 2], 835 pVCpu->iem.s.abOpcode[offOpcode + 3]); 836 *pu64 = (uint64_t)(int64_t)i32; 837 pVCpu->iem.s.offOpcode = offOpcode + 4; 838 return VINF_SUCCESS; 839 } 840 # endif /* !IEM_WITH_SETJMP */ 368 # define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu)) 841 369 842 370 /** … … 847 375 * @remark Implicitly references pVCpu. 848 376 */ 849 # ifndef IEM_WITH_SETJMP 850 # define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \ 851 do \ 852 { \ 853 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \ 854 if (rcStrict2 != VINF_SUCCESS) \ 855 return rcStrict2; \ 856 } while (0) 857 # else 858 # define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int32_t)iemOpcodeGetNextU32Jmp(pVCpu)) 859 # endif 860 861 # ifndef IEM_WITH_SETJMP 862 863 /** 864 * Fetches the next opcode qword. 865 * 866 * @returns Strict VBox status code. 867 * @param pVCpu The cross context virtual CPU structure of the calling thread. 868 * @param pu64 Where to return the opcode qword. 869 */ 870 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT 871 { 872 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 873 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode)) 874 { 875 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 876 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 877 # else 878 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 879 pVCpu->iem.s.abOpcode[offOpcode + 1], 880 pVCpu->iem.s.abOpcode[offOpcode + 2], 881 pVCpu->iem.s.abOpcode[offOpcode + 3], 882 pVCpu->iem.s.abOpcode[offOpcode + 4], 883 pVCpu->iem.s.abOpcode[offOpcode + 5], 884 pVCpu->iem.s.abOpcode[offOpcode + 6], 885 pVCpu->iem.s.abOpcode[offOpcode + 7]); 886 # endif 887 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8; 888 return VINF_SUCCESS; 889 } 890 return iemOpcodeGetNextU64Slow(pVCpu, pu64); 891 } 892 893 # else /* IEM_WITH_SETJMP */ 377 # define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int32_t)iemOpcodeGetNextU32Jmp(pVCpu)) 378 894 379 895 380 /** … … 901 386 DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 902 387 { 903 # 388 # ifdef IEM_WITH_CODE_TLB 904 389 uint64_t u64Ret; 905 390 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; … … 909 394 { 910 395 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8; 911 # 396 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 912 397 u64Ret = *(uint64_t const *)&pbBuf[offBuf]; 913 # 398 # else 914 399 u64Ret = RT_MAKE_U64_FROM_U8(pbBuf[offBuf], 915 400 pbBuf[offBuf + 1], … … 920 405 pbBuf[offBuf + 6], 921 406 pbBuf[offBuf + 7]); 922 # 407 # endif 923 408 } 924 409 else 925 410 u64Ret = iemOpcodeGetNextU64SlowJmp(pVCpu); 926 411 927 # 412 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 928 413 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 929 414 Assert(offOpcode + 7 < sizeof(pVCpu->iem.s.abOpcode)); 930 # 415 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 931 416 *(uint64_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u64Ret; 932 # 417 # else 933 418 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u64Ret); 934 419 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u64Ret); … … 939 424 pVCpu->iem.s.abOpcode[offOpcode + 6] = RT_BYTE7(u64Ret); 940 425 pVCpu->iem.s.abOpcode[offOpcode + 7] = RT_BYTE8(u64Ret); 941 # 426 # endif 942 427 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)8; 943 # 428 # endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */ 944 429 945 430 return u64Ret; 946 431 947 # 432 # else /* !IEM_WITH_CODE_TLB */ 948 433 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 949 434 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode)) 950 435 { 951 436 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8; 952 # 437 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 953 438 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 954 # 439 # else 955 440 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 956 441 pVCpu->iem.s.abOpcode[offOpcode + 1], … … 961 446 pVCpu->iem.s.abOpcode[offOpcode + 6], 962 447 pVCpu->iem.s.abOpcode[offOpcode + 7]); 963 # 448 # endif 964 449 } 965 450 return iemOpcodeGetNextU64SlowJmp(pVCpu); 966 # endif /* !IEM_WITH_CODE_TLB */ 967 } 968 969 # endif /* IEM_WITH_SETJMP */ 451 # endif /* !IEM_WITH_CODE_TLB */ 452 } 970 453 971 454 /** … … 975 458 * @remark Implicitly references pVCpu. 976 459 */ 977 # ifndef IEM_WITH_SETJMP 978 # define IEM_OPCODE_GET_NEXT_U64(a_pu64) \ 979 do \ 980 { \ 981 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \ 982 if (rcStrict2 != VINF_SUCCESS) \ 983 return rcStrict2; \ 984 } while (0) 985 # else 986 # define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) ) 987 # endif 460 # define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) ) 988 461 989 462 /** … … 997 470 * used instead. At least for now... 998 471 */ 999 # ifndef IEM_WITH_SETJMP 1000 # define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \ 1001 RTGCPTR GCPtrEff; \ 1002 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff); \ 1003 if (rcStrict != VINF_SUCCESS) \ 1004 return rcStrict; \ 1005 } while (0) 1006 # else 1007 # define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \ 472 # define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \ 1008 473 (void)iemOpHlpCalcRmEffAddrJmp(pVCpu, bRm, 0); \ 1009 474 } while (0) 1010 # endif 1011 1012 #endif /* !IEM_WITH_OPAQUE_DECODER_STATE */ 1013 1014 1015 #ifndef IEM_WITH_OPAQUE_DECODER_STATE 475 476 477 1016 478 1017 479 /** -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMInlineMem-x86.h
r108262 r108278 137 137 138 138 139 #ifdef IEM_WITH_SETJMP140 139 141 140 /** @todo slim this down */ … … 278 277 } 279 278 280 #endif /* IEM_WITH_SETJMP */281 279 282 280 /** -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMInternal-x86.h
r108244 r108278 2846 2846 VBOXSTRICTRC iemRaiseXcptOrInt(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags, 2847 2847 uint16_t uErr, uint64_t uCr2) RT_NOEXCEPT; 2848 #ifdef IEM_WITH_SETJMP2849 2848 DECL_NO_RETURN(void) iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, 2850 2849 uint32_t fFlags, uint16_t uErr, uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP; 2851 #endif2852 2850 VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT; 2853 #ifdef IEM_WITH_SETJMP2854 2851 DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP; 2855 #endif2856 2852 VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT; 2857 2853 VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT; 2858 2854 VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT; 2859 #ifdef IEM_WITH_SETJMP2860 2855 DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP; 2861 #endif2862 2856 VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT; 2863 #ifdef IEM_WITH_SETJMP2864 2857 DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP; 2865 #endif2866 2858 VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT; 2867 2859 VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT; … … 2875 2867 VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT; 2876 2868 VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT; 2877 #ifdef IEM_WITH_SETJMP2878 2869 DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP; 2879 #endif2880 2870 VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT; 2881 2871 VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT; 2882 2872 VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT; 2883 #ifdef IEM_WITH_SETJMP2884 2873 DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP; 2885 #endif2886 2874 VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT; 2887 #ifdef IEM_WITH_SETJMP2888 2875 DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP; 2889 #endif2890 2876 VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT; 2891 #ifdef IEM_WITH_SETJMP2892 2877 DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP; 2893 #endif2894 2878 VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT; 2895 #ifdef IEM_WITH_SETJMP2896 2879 DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP; 2897 #endif2898 2880 VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT; 2899 #ifdef IEM_WITH_SETJMP2900 2881 DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP; 2901 #endif2902 2882 VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT; 2903 #ifdef IEM_WITH_SETJMP2904 2883 DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP; 2905 #endif2906 2884 VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT; 2907 #ifdef IEM_WITH_SETJMP2908 2885 DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP; 2909 #endif2910 2886 2911 2887 void iemLogSyscallRealModeInt(PVMCPUCC pVCpu, uint8_t u8Vector, uint8_t cbInstr); … … 3029 3005 VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT; 3030 3006 #endif 3031 #ifdef IEM_WITH_SETJMP3032 3007 uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP; 3033 3008 uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP; 3034 3009 uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP; 3035 3010 uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP; 3036 #else3037 VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT;3038 VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;3039 VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;3040 VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;3041 VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;3042 VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;3043 VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;3044 VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;3045 VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;3046 VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;3047 VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;3048 #endif3049 3011 3050 3012 VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; … … 3066 3028 VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg, 3067 3029 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT; 3068 #ifdef IEM_WITH_SETJMP3069 3030 uint8_t iemMemFetchDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 3070 3031 uint16_t iemMemFetchDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; … … 3083 3044 void iemMemFetchDataU256NoAcSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 3084 3045 void iemMemFetchDataU256AlignedAvxSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 3085 # 3046 #if 0 /* these are inlined now */ 3086 3047 uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 3087 3048 uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; … … 3097 3058 void iemMemFetchDataU256NoAcJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 3098 3059 void iemMemFetchDataU256AlignedAvxJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 3099 # 3060 #endif 3100 3061 void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 3101 #endif3102 3062 3103 3063 VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; … … 3120 3080 VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT; 3121 3081 VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; 3122 #ifdef IEM_WITH_SETJMP3123 3082 void iemMemStoreDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP; 3124 3083 void iemMemStoreDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP; … … 3133 3092 void iemMemStoreDataR80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTFLOAT80U pr80Value) IEM_NOEXCEPT_MAY_LONGJMP; 3134 3093 void iemMemStoreDataD80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTPBCD80U pd80Value) IEM_NOEXCEPT_MAY_LONGJMP; 3135 #if 0 3094 #if 0 /* inlined */ 3136 3095 void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP; 3137 3096 void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP; … … 3145 3104 void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP; 3146 3105 void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP; 3147 #endif 3148 3149 #ifdef IEM_WITH_SETJMP 3106 3150 3107 uint8_t *iemMemMapDataU8RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 3151 3108 uint8_t *iemMemMapDataU8AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; … … 3174 3131 PRTUINT128U iemMemMapDataU128WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 3175 3132 PCRTUINT128U iemMemMapDataU128RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 3176 #endif3177 3133 3178 3134 VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign, … … 3198 3154 VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT; 3199 3155 3200 #ifdef IEM_WITH_SETJMP3201 3156 void iemMemStackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP; 3202 3157 void iemMemStackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP; … … 3226 3181 uint32_t iemMemFetchStackU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 3227 3182 uint64_t iemMemFetchStackU64SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 3228 3229 #endif3230 3183 3231 3184 /** @} */ -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMOpHlp-x86.h
r108261 r108278 654 654 == (pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT)) 655 655 656 VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT; 657 VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT; 658 #ifdef IEM_WITH_SETJMP 659 RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP; 660 RTGCPTR iemOpHlpCalcRmEffAddrJmpEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, uint64_t *puInfo) IEM_NOEXCEPT_MAY_LONGJMP; 661 #endif 656 RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP; 657 RTGCPTR iemOpHlpCalcRmEffAddrJmpEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, uint64_t *puInfo) IEM_NOEXCEPT_MAY_LONGJMP; 662 658 663 659 /** @} */ -
trunk/src/VBox/VMM/include/IEMInline.h
r108260 r108278 351 351 */ 352 352 353 #ifdef IEM_WITH_SETJMP354 355 353 DECL_INLINE_THROW(void) iemMemCommitAndUnmapRwJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 356 354 { 357 # 355 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 358 356 if (RT_LIKELY(bMapInfo == 0)) 359 357 return; 360 # 358 #endif 361 359 iemMemCommitAndUnmapRwSafeJmp(pVCpu, bMapInfo); 362 360 } … … 365 363 DECL_INLINE_THROW(void) iemMemCommitAndUnmapAtJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 366 364 { 367 # 365 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 368 366 if (RT_LIKELY(bMapInfo == 0)) 369 367 return; 370 # 368 #endif 371 369 iemMemCommitAndUnmapAtSafeJmp(pVCpu, bMapInfo); 372 370 } … … 375 373 DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 376 374 { 377 # 375 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 378 376 if (RT_LIKELY(bMapInfo == 0)) 379 377 return; 380 # 378 #endif 381 379 iemMemCommitAndUnmapWoSafeJmp(pVCpu, bMapInfo); 382 380 } … … 385 383 DECL_INLINE_THROW(void) iemMemCommitAndUnmapRoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 386 384 { 387 # 385 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 388 386 if (RT_LIKELY(bMapInfo == 0)) 389 387 return; 390 # 388 #endif 391 389 iemMemCommitAndUnmapRoSafeJmp(pVCpu, bMapInfo); 392 390 } … … 394 392 DECLINLINE(void) iemMemRollbackAndUnmapWo(PVMCPUCC pVCpu, uint8_t bMapInfo) RT_NOEXCEPT 395 393 { 396 # 394 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 397 395 if (RT_LIKELY(bMapInfo == 0)) 398 396 return; 399 # 397 #endif 400 398 iemMemRollbackAndUnmapWoSafe(pVCpu, bMapInfo); 401 399 } 402 403 #endif /* IEM_WITH_SETJMP */404 405 400 406 401 /** @} */ -
trunk/src/VBox/VMM/include/IEMInternal-armv8.h
r108186 r108278 56 56 #endif 57 57 58 /** @def IEM_WITH_SETJMP59 * Enables alternative status code handling using setjmps.60 *61 * This adds a bit of expense via the setjmp() call since it saves all the62 * non-volatile registers. However, it eliminates return code checks and allows63 * for more optimal return value passing (return regs instead of stack buffer).64 */65 #if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 166 # define IEM_WITH_SETJMP67 #endif68 69 58 /** @def IEM_WITH_THROW_CATCH 70 59 * Enables using C++ throw/catch as an alternative to setjmp/longjmp in user 71 * mode code when IEM_WITH_SETJMP is in effect.60 * mode code. 72 61 * 73 62 * With GCC 11.3.1 and code TLB on linux, using throw/catch instead of … … 80 69 * Linux, but it should be quite a bit faster for normal code. 81 70 */ 82 #if (defined(I EM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \71 #if (defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \ 83 72 || defined(DOXYGEN_RUNNING) 84 73 # define IEM_WITH_THROW_CATCH … … 92 81 * @param a_rc The status code jump back with / throw. 93 82 */ 94 #if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING) 95 # ifdef IEM_WITH_THROW_CATCH 96 # define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc) 97 # else 98 # define IEM_DO_LONGJMP(a_pVCpu, a_rc) longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc)) 99 # endif 83 #ifdef IEM_WITH_THROW_CATCH 84 # define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc) 85 #else 86 # define IEM_DO_LONGJMP(a_pVCpu, a_rc) longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc)) 100 87 #endif 101 88 … … 130 117 * @see https://developercommunity.visualstudio.com/t/fragile-behavior-of-longjmp-called-from-noexcept-f/1532859 131 118 */ 132 #if defined( IEM_WITH_SETJMP) && (defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH))119 #if defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH) 133 120 # define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT_EX(false) 134 121 #else … … 1222 1209 VBOXSTRICTRC iemRaiseXcptOrInt(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags, 1223 1210 uint16_t uErr, uint64_t uCr2) RT_NOEXCEPT; 1224 #ifdef IEM_WITH_SETJMP1225 1211 DECL_NO_RETURN(void) iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, 1226 1212 uint32_t fFlags, uint16_t uErr, uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP; 1227 #endif1228 1213 VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT; 1229 1214 VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT; 1230 1215 VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT; 1231 1216 VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT; 1232 #ifdef IEM_WITH_SETJMP1233 1217 DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP; 1234 #endif1235 1218 VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT; 1236 1219 VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT; 1237 #ifdef IEM_WITH_SETJMP1238 1220 DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP; 1239 #endif1240 1221 VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT; 1241 1222 … … 1287 1268 VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; 1288 1269 VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; 1289 #ifdef IEM_WITH_SETJMP1290 1270 uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 1291 1271 uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; … … 1296 1276 void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 1297 1277 void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 1298 #endif1299 1278 1300 1279 VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; … … 1309 1288 VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT; 1310 1289 VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT; 1311 #ifdef IEM_WITH_SETJMP1312 1290 void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP; 1313 1291 void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP; … … 1316 1294 void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP; 1317 1295 void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP; 1318 #endif1319 1296 1320 1297 VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign, -
trunk/src/VBox/VMM/include/IEMInternal.h
r108226 r108278 65 65 #endif 66 66 67 /** @def IEM_WITH_SETJMP68 * Enables alternative status code handling using setjmps.69 *70 * This adds a bit of expense via the setjmp() call since it saves all the71 * non-volatile registers. However, it eliminates return code checks and allows72 * for more optimal return value passing (return regs instead of stack buffer).73 */74 #if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 175 # define IEM_WITH_SETJMP76 #endif77 78 67 /** @def IEM_WITH_THROW_CATCH 79 68 * Enables using C++ throw/catch as an alternative to setjmp/longjmp in user 80 * mode code when IEM_WITH_SETJMP is in effect.69 * mode code. 81 70 * 82 71 * With GCC 11.3.1 and code TLB on linux, using throw/catch instead of … … 89 78 * Linux, but it should be quite a bit faster for normal code. 90 79 */ 91 #if defined(__cplusplus) && defined(I EM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER)) /* ASM-NOINC-START */80 #if defined(__cplusplus) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER)) /* ASM-NOINC-START */ 92 81 # define IEM_WITH_THROW_CATCH 93 82 #endif /*ASM-NOINC-END*/ … … 183 172 * @param a_rc The status code jump back with / throw. 184 173 */ 185 #if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING) 186 # ifdef IEM_WITH_THROW_CATCH 187 # ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP 188 # define IEM_DO_LONGJMP(a_pVCpu, a_rc) do { \ 174 #ifdef IEM_WITH_THROW_CATCH 175 # ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP 176 # define IEM_DO_LONGJMP(a_pVCpu, a_rc) do { \ 189 177 if ((a_pVCpu)->iem.s.pvTbFramePointerR3) \ 190 178 iemNativeTbLongJmp((a_pVCpu)->iem.s.pvTbFramePointerR3, (a_rc)); \ 191 179 throw int(a_rc); \ 192 180 } while (0) 193 # else194 # define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc)195 # endif196 181 # else 197 # define IEM_DO_LONGJMP(a_pVCpu, a_rc) longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))182 # define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc) 198 183 # endif 184 #else 185 # define IEM_DO_LONGJMP(a_pVCpu, a_rc) longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc)) 199 186 #endif 200 187 … … 229 216 * @see https://developercommunity.visualstudio.com/t/fragile-behavior-of-longjmp-called-from-noexcept-f/1532859 230 217 */ 231 #if defined( IEM_WITH_SETJMP) && (defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH))218 #if defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH) 232 219 # define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT_EX(false) 233 220 #else … … 2537 2524 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT. 2538 2525 */ 2539 #if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING) 2540 # ifdef IEM_WITH_THROW_CATCH 2541 # define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \ 2526 #ifdef IEM_WITH_THROW_CATCH 2527 # define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \ 2542 2528 a_rcTarget = VINF_SUCCESS; \ 2543 2529 try 2544 # 2530 # define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \ 2545 2531 IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) 2546 # 2532 # define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \ 2547 2533 catch (int rcThrown) \ 2548 2534 { \ 2549 2535 a_rcTarget = rcThrown 2550 # 2536 # define IEM_CATCH_LONGJMP_END(a_pVCpu) \ 2551 2537 } \ 2552 2538 ((void)0) 2553 # 2554 # 2539 #else /* !IEM_WITH_THROW_CATCH */ 2540 # define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \ 2555 2541 jmp_buf JmpBuf; \ 2556 2542 jmp_buf * volatile pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \ 2557 2543 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \ 2558 2544 if ((rcStrict = setjmp(JmpBuf)) == 0) 2559 # 2545 # define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \ 2560 2546 pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \ 2561 2547 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \ 2562 2548 if ((rcStrict = setjmp(JmpBuf)) == 0) 2563 # 2549 # define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \ 2564 2550 else \ 2565 2551 { \ 2566 2552 ((void)0) 2567 # 2553 # define IEM_CATCH_LONGJMP_END(a_pVCpu) \ 2568 2554 } \ 2569 2555 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf 2570 # endif /* !IEM_WITH_THROW_CATCH */ 2571 #endif /* IEM_WITH_SETJMP */ 2556 #endif /* !IEM_WITH_THROW_CATCH */ 2572 2557 2573 2558 … … 3171 3156 void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT; 3172 3157 3173 #ifdef IEM_WITH_SETJMP3174 3158 void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 3175 3159 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP; … … 3178 3162 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 3179 3163 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT; 3180 #endif3181 3164 3182 3165 void iemTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu) RT_NOEXCEPT; -
trunk/src/VBox/VMM/include/IEMMc.h
r108269 r108278 970 970 do { IEM_MC_INT_CLEAR_ZMM_256_UP(a_iYReg); } while (0) 971 971 972 #ifndef IEM_WITH_SETJMP 973 # define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \ 974 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem))) 975 # define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \ 976 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16))) 977 # define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \ 978 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32))) 979 #else 980 # define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \ 972 #define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \ 981 973 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 982 # 974 #define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \ 983 975 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16))) 984 # 976 #define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \ 985 977 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32))) 986 978 987 # 979 #define IEM_MC_FETCH_MEM_FLAT_U8(a_u8Dst, a_GCPtrMem) \ 988 980 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem))) 989 # 981 #define IEM_MC_FETCH_MEM16_FLAT_U8(a_u8Dst, a_GCPtrMem16) \ 990 982 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem16))) 991 # 983 #define IEM_MC_FETCH_MEM32_FLAT_U8(a_u8Dst, a_GCPtrMem32) \ 992 984 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem32))) 993 #endif 994 995 #ifndef IEM_WITH_SETJMP 996 # define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 997 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem))) 998 # define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 999 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 1000 # define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \ 1001 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem))) 1002 # define IEM_MC_FETCH_MEM_I16_DISP(a_i16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 1003 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 1004 #else 1005 # define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 985 986 #define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 1006 987 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1007 # 988 #define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 1008 989 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 1009 # 990 #define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \ 1010 991 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1011 # 992 #define IEM_MC_FETCH_MEM_I16_DISP(a_i16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 1012 993 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 1013 994 1014 # 995 #define IEM_MC_FETCH_MEM_FLAT_U16(a_u16Dst, a_GCPtrMem) \ 1015 996 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem))) 1016 # 997 #define IEM_MC_FETCH_MEM_FLAT_U16_DISP(a_u16Dst, a_GCPtrMem, a_offDisp) \ 1017 998 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp))) 1018 # 999 #define IEM_MC_FETCH_MEM_FLAT_I16(a_i16Dst, a_GCPtrMem) \ 1019 1000 ((a_i16Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem))) 1020 # 1001 #define IEM_MC_FETCH_MEM_FLAT_I16_DISP(a_i16Dst, a_GCPtrMem, a_offDisp) \ 1021 1002 ((a_i16Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp))) 1022 #endif 1023 1024 #ifndef IEM_WITH_SETJMP 1025 # define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 1026 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem))) 1027 # define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 1028 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 1029 # define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \ 1030 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem))) 1031 # define IEM_MC_FETCH_MEM_I32_DISP(a_i32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 1032 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 1033 #else 1034 # define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 1003 1004 #define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 1035 1005 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1036 # 1006 #define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 1037 1007 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 1038 # 1008 #define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \ 1039 1009 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1040 # 1010 #define IEM_MC_FETCH_MEM_I32_DISP(a_i32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 1041 1011 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 1042 1012 1043 # 1013 #define IEM_MC_FETCH_MEM_FLAT_U32(a_u32Dst, a_GCPtrMem) \ 1044 1014 ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem))) 1045 # 1015 #define IEM_MC_FETCH_MEM_FLAT_U32_DISP(a_u32Dst, a_GCPtrMem, a_offDisp) \ 1046 1016 ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp))) 1047 # 1017 #define IEM_MC_FETCH_MEM_FLAT_I32(a_i32Dst, a_GCPtrMem) \ 1048 1018 ((a_i32Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem))) 1049 # 1019 #define IEM_MC_FETCH_MEM_FLAT_I32_DISP(a_i32Dst, a_GCPtrMem, a_offDisp) \ 1050 1020 ((a_i32Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp))) 1051 #endif 1052 1053 #ifndef IEM_WITH_SETJMP 1054 # define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1055 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem))) 1056 # define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 1057 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 1058 # define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1059 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem))) 1060 # define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \ 1061 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem))) 1062 #else 1063 # define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1021 1022 #define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1064 1023 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1065 # 1024 #define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 1066 1025 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 1067 # 1026 #define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1068 1027 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1069 # 1028 #define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \ 1070 1029 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1071 1030 1072 # 1031 #define IEM_MC_FETCH_MEM_FLAT_U64(a_u64Dst, a_GCPtrMem) \ 1073 1032 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem))) 1074 # 1033 #define IEM_MC_FETCH_MEM_FLAT_U64_DISP(a_u64Dst, a_GCPtrMem, a_offDisp) \ 1075 1034 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp))) 1076 # 1035 #define IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128(a_u64Dst, a_GCPtrMem) \ 1077 1036 ((a_u64Dst) = iemMemFlatFetchDataU64AlignedU128Jmp(pVCpu, (a_GCPtrMem))) 1078 # 1037 #define IEM_MC_FETCH_MEM_FLAT_I64(a_i64Dst, a_GCPtrMem) \ 1079 1038 ((a_i64Dst) = (int64_t)iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem))) 1080 #endif 1081 1082 #ifndef IEM_WITH_SETJMP 1083 # define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \ 1084 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u, (a_iSeg), (a_GCPtrMem))) 1085 # define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \ 1086 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).u, (a_iSeg), (a_GCPtrMem))) 1087 # define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \ 1088 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))) 1089 # define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \ 1090 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataD80(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem))) 1091 #else 1092 # define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \ 1039 1040 #define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \ 1093 1041 ((a_r32Dst).u = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1094 # 1042 #define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \ 1095 1043 ((a_r64Dst).u = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1096 # 1044 #define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \ 1097 1045 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)) 1098 # 1046 #define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \ 1099 1047 iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem)) 1100 1048 1101 # 1049 #define IEM_MC_FETCH_MEM_FLAT_R32(a_r32Dst, a_GCPtrMem) \ 1102 1050 ((a_r32Dst).u = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem))) 1103 # 1051 #define IEM_MC_FETCH_MEM_FLAT_R64(a_r64Dst, a_GCPtrMem) \ 1104 1052 ((a_r64Dst).u = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem))) 1105 # 1053 #define IEM_MC_FETCH_MEM_FLAT_R80(a_r80Dst, a_GCPtrMem) \ 1106 1054 iemMemFlatFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_GCPtrMem)) 1107 # 1055 #define IEM_MC_FETCH_MEM_FLAT_D80(a_d80Dst, a_GCPtrMem) \ 1108 1056 iemMemFlatFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_GCPtrMem)) 1109 #endif 1110 1111 #ifndef IEM_WITH_SETJMP 1112 # define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \ 1113 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))) 1114 # define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \ 1115 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128NoAc(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))) 1116 # define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \ 1117 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))) 1118 1119 # define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \ 1120 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128NoAc(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))) 1121 # define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \ 1122 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))) 1123 1124 # define IEM_MC_FETCH_MEM_U128_NO_AC_AND_XREG_U128(a_u128Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \ 1125 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128NoAc(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2))); \ 1126 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ 1127 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \ 1128 } while (0) 1129 1130 # define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \ 1131 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2))); \ 1132 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ 1133 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \ 1134 } while (0) 1135 1136 # define IEM_MC_FETCH_MEM_XMM_NO_AC_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \ 1137 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128NoAc(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2))); \ 1138 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ 1139 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \ 1140 } while (0) 1141 1142 # define IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_iSeg2, a_GCPtrMem2) do { \ 1143 (a_Dst).uSrc2.uXmm.au64[0] = 0; \ 1144 (a_Dst).uSrc2.uXmm.au64[1] = 0; \ 1145 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_Dst).uSrc2.uXmm.au32[(a_iDWord2)], (a_iSeg2), (a_GCPtrMem2))); \ 1146 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ 1147 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \ 1148 } while (0) 1149 1150 # define IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_iSeg2, a_GCPtrMem2) do { \ 1151 (a_Dst).uSrc2.uXmm.au64[1] = 0; \ 1152 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_Dst).uSrc2.uXmm.au64[(a_iQWord2)], (a_iSeg2), (a_GCPtrMem2))); \ 1153 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ 1154 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \ 1155 } while (0) 1156 1157 # define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \ 1158 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2))); \ 1159 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ 1160 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \ 1161 (a_Dst).u64Rax = pVCpu->cpum.GstCtx.rax; \ 1162 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \ 1163 } while (0) 1164 # define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \ 1165 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2))); \ 1166 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ 1167 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \ 1168 (a_Dst).u64Rax = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \ 1169 (a_Dst).u64Rdx = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \ 1170 } while (0) 1171 1172 #else 1173 # define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \ 1057 1058 #define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \ 1174 1059 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)) 1175 # 1060 #define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \ 1176 1061 iemMemFetchDataU128NoAcJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)) 1177 # 1062 #define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \ 1178 1063 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)) 1179 1064 1180 # 1065 #define IEM_MC_FETCH_MEM_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \ 1181 1066 iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)) 1182 # 1067 #define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \ 1183 1068 iemMemFetchDataU128NoAcJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)) 1184 # 1069 #define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \ 1185 1070 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)) 1186 1071 1187 # 1072 #define IEM_MC_FETCH_MEM_FLAT_U128(a_u128Dst, a_GCPtrMem) \ 1188 1073 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_GCPtrMem)) 1189 # 1074 #define IEM_MC_FETCH_MEM_FLAT_U128_NO_AC(a_u128Dst, a_GCPtrMem) \ 1190 1075 iemMemFlatFetchDataU128NoAcJmp(pVCpu, &(a_u128Dst), (a_GCPtrMem)) 1191 # 1076 #define IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE(a_u128Dst, a_GCPtrMem) \ 1192 1077 iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_GCPtrMem)) 1193 1078 1194 # 1079 #define IEM_MC_FETCH_MEM_FLAT_XMM(a_XmmDst, a_GCPtrMem) \ 1195 1080 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem)) 1196 # 1081 #define IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC(a_XmmDst, a_GCPtrMem) \ 1197 1082 iemMemFlatFetchDataU128NoAcJmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem)) 1198 # 1083 #define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE(a_XmmDst, a_GCPtrMem) \ 1199 1084 iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem)) 1200 1085 1201 # 1086 #define IEM_MC_FETCH_MEM_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \ 1202 1087 iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \ 1203 1088 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ 1204 1089 (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \ 1205 1090 } while (0) 1206 # 1091 #define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_GCPtrMem2) do { \ 1207 1092 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \ 1208 1093 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ … … 1210 1095 } while (0) 1211 1096 1212 # 1097 #define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \ 1213 1098 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2)); \ 1214 1099 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ … … 1216 1101 } while (0) 1217 1102 1218 # 1103 #define IEM_MC_FETCH_MEM_XMM_NO_AC_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \ 1219 1104 iemMemFetchDataU128NoAcJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2)); \ 1220 1105 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ … … 1222 1107 } while (0) 1223 1108 1224 # 1109 #define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_GCPtrMem2) do { \ 1225 1110 iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_GCPtrMem2)); \ 1226 1111 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ … … 1228 1113 } while (0) 1229 1114 1230 # 1115 #define IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC_AND_XREG_XMM(a_Dst, a_iXReg1, a_GCPtrMem2) do { \ 1231 1116 iemMemFlatFetchDataU128NoAcJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_GCPtrMem2)); \ 1232 1117 (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ … … 1234 1119 } while (0) 1235 1120 1236 # 1121 #define IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_iSeg2, a_GCPtrMem2) do { \ 1237 1122 (a_Dst).uSrc2.uXmm.au64[0] = 0; \ 1238 1123 (a_Dst).uSrc2.uXmm.au64[1] = 0; \ … … 1241 1126 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \ 1242 1127 } while (0) 1243 # 1128 #define IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_GCPtrMem2) do { \ 1244 1129 (a_Dst).uSrc2.uXmm.au64[0] = 0; \ 1245 1130 (a_Dst).uSrc2.uXmm.au64[1] = 0; \ … … 1249 1134 } while (0) 1250 1135 1251 # 1136 #define IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_iSeg2, a_GCPtrMem2) do { \ 1252 1137 (a_Dst).uSrc2.uXmm.au64[!(a_iQWord2)] = 0; \ 1253 1138 (a_Dst).uSrc2.uXmm.au64[(a_iQWord2)] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg2), (a_GCPtrMem2)); \ … … 1255 1140 (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \ 1256 1141 } while (0) 1257 # 1142 #define IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_GCPtrMem2) do { \ 1258 1143 (a_Dst).uSrc2.uXmm.au64[1] = 0; \ 1259 1144 (a_Dst).uSrc2.uXmm.au64[(a_iQWord2)] = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem2)); \ … … 1263 1148 1264 1149 1265 # 1150 #define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \ 1266 1151 iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \ 1267 1152 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ … … 1270 1155 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \ 1271 1156 } while (0) 1272 # 1157 #define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \ 1273 1158 iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \ 1274 1159 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ … … 1278 1163 } while (0) 1279 1164 1280 # 1165 #define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \ 1281 1166 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \ 1282 1167 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ … … 1285 1170 (a_Dst).u64Rdx = pVCpu->cpum.GstCtx.rdx; \ 1286 1171 } while (0) 1287 # 1172 #define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \ 1288 1173 iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \ 1289 1174 (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \ … … 1293 1178 } while (0) 1294 1179 1295 #endif 1296 1297 #ifndef IEM_WITH_SETJMP 1298 # define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \ 1299 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))) 1300 # define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \ 1301 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))) 1302 # define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \ 1303 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedAvx(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))) 1304 1305 # define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \ 1306 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))) 1307 # define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \ 1308 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))) 1309 # define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \ 1310 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedAvx(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))) 1311 1312 # define IEM_MC_FETCH_MEM_YMM_NO_AC_AND_YREG_YMM(a_uYmmDst, a_iYRegSrc1, a_iSeg2, a_GCPtrMem2) do { \ 1313 uintptr_t const a_iYRegSrc1Tmp = (a_iYRegSrc1); \ 1314 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_uYmmDst).uSrc2.ymm, (a_iSeg2), (a_GCPtrMem2))); \ 1315 (a_uYmmDst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[a_iYRegSrc1Tmp].au64[0]; \ 1316 (a_uYmmDst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[a_iYRegSrc1Tmp].au64[1]; \ 1317 (a_uYmmDst).uSrc1.au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[a_iYRegSrc1Tmp].au64[0]; \ 1318 (a_uYmmDst).uSrc1.au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[a_iYRegSrc1Tmp].au64[1]; \ 1319 } while (0) 1320 1321 #else 1322 # define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \ 1180 1181 #define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \ 1323 1182 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)) 1324 # 1183 #define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \ 1325 1184 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)) 1326 # 1185 #define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \ 1327 1186 iemMemFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)) 1328 1187 1329 # 1188 #define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \ 1330 1189 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)) 1331 # 1190 #define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \ 1332 1191 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)) 1333 # 1192 #define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \ 1334 1193 iemMemFetchDataU256AlignedAvxJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)) 1335 1194 1336 # 1195 #define IEM_MC_FETCH_MEM_YMM_NO_AC_AND_YREG_YMM(a_uYmmDst, a_iYRegSrc1, a_iSeg2, a_GCPtrMem2) do { \ 1337 1196 uintptr_t const a_iYRegSrc1Tmp = (a_iYRegSrc1); \ 1338 1197 iemMemFetchDataU256NoAcJmp(pVCpu, &(a_uYmmDst).uSrc2.ymm, (a_iSeg2), (a_GCPtrMem2)); \ … … 1343 1202 } while (0) 1344 1203 1345 # 1204 #define IEM_MC_FETCH_MEM_FLAT_U256(a_u256Dst, a_GCPtrMem) \ 1346 1205 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem)) 1347 # 1206 #define IEM_MC_FETCH_MEM_FLAT_U256_NO_AC(a_u256Dst, a_GCPtrMem) \ 1348 1207 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem)) 1349 # 1208 #define IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX(a_u256Dst, a_GCPtrMem) \ 1350 1209 iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem)) 1351 1210 1352 # 1211 #define IEM_MC_FETCH_MEM_FLAT_YMM(a_YmmDst, a_GCPtrMem) \ 1353 1212 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem)) 1354 # 1213 #define IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC(a_YmmDst, a_GCPtrMem) \ 1355 1214 iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem)) 1356 # 1215 #define IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX(a_YmmDst, a_GCPtrMem) \ 1357 1216 iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem)) 1358 1217 1359 # 1218 #define IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX_AND_YREG_YMM(a_uYmmDst, a_iYRegSrc1, a_GCPtrMem2) do { \ 1360 1219 uintptr_t const a_iYRegSrc1Tmp = (a_iYRegSrc1); \ 1361 1220 iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_uYmmDst).uSrc2.ymm, (a_GCPtrMem2)); \ … … 1366 1225 } while (0) 1367 1226 1368 #endif 1369 1370 1371 1372 #ifndef IEM_WITH_SETJMP 1373 # define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 1374 do { \ 1375 uint8_t u8Tmp; \ 1376 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \ 1377 (a_u16Dst) = u8Tmp; \ 1378 } while (0) 1379 # define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 1380 do { \ 1381 uint8_t u8Tmp; \ 1382 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \ 1383 (a_u32Dst) = u8Tmp; \ 1384 } while (0) 1385 # define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1386 do { \ 1387 uint8_t u8Tmp; \ 1388 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \ 1389 (a_u64Dst) = u8Tmp; \ 1390 } while (0) 1391 # define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 1392 do { \ 1393 uint16_t u16Tmp; \ 1394 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \ 1395 (a_u32Dst) = u16Tmp; \ 1396 } while (0) 1397 # define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1398 do { \ 1399 uint16_t u16Tmp; \ 1400 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \ 1401 (a_u64Dst) = u16Tmp; \ 1402 } while (0) 1403 # define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1404 do { \ 1405 uint32_t u32Tmp; \ 1406 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \ 1407 (a_u64Dst) = u32Tmp; \ 1408 } while (0) 1409 #else /* IEM_WITH_SETJMP */ 1410 # define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 1227 1228 1229 #define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 1411 1230 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1412 # 1231 #define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 1413 1232 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1414 # 1233 #define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1415 1234 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1416 # 1235 #define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 1417 1236 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1418 # 1237 #define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1419 1238 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1420 # 1239 #define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1421 1240 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1422 1241 1423 # 1242 #define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16(a_u16Dst, a_GCPtrMem) \ 1424 1243 ((a_u16Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem))) 1425 # 1244 #define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32(a_u32Dst, a_GCPtrMem) \ 1426 1245 ((a_u32Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem))) 1427 # 1246 #define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64(a_u64Dst, a_GCPtrMem) \ 1428 1247 ((a_u64Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem))) 1429 # 1248 #define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32(a_u32Dst, a_GCPtrMem) \ 1430 1249 ((a_u32Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem))) 1431 # 1250 #define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64(a_u64Dst, a_GCPtrMem) \ 1432 1251 ((a_u64Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem))) 1433 # 1252 #define IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64(a_u64Dst, a_GCPtrMem) \ 1434 1253 ((a_u64Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem))) 1435 #endif /* IEM_WITH_SETJMP */ 1436 1437 #ifndef IEM_WITH_SETJMP 1438 # define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 1439 do { \ 1440 uint8_t u8Tmp; \ 1441 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \ 1442 (a_u16Dst) = (int8_t)u8Tmp; \ 1443 } while (0) 1444 # define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 1445 do { \ 1446 uint8_t u8Tmp; \ 1447 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \ 1448 (a_u32Dst) = (int8_t)u8Tmp; \ 1449 } while (0) 1450 # define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1451 do { \ 1452 uint8_t u8Tmp; \ 1453 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \ 1454 (a_u64Dst) = (int8_t)u8Tmp; \ 1455 } while (0) 1456 # define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 1457 do { \ 1458 uint16_t u16Tmp; \ 1459 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \ 1460 (a_u32Dst) = (int16_t)u16Tmp; \ 1461 } while (0) 1462 # define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1463 do { \ 1464 uint16_t u16Tmp; \ 1465 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \ 1466 (a_u64Dst) = (int16_t)u16Tmp; \ 1467 } while (0) 1468 # define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1469 do { \ 1470 uint32_t u32Tmp; \ 1471 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \ 1472 (a_u64Dst) = (int32_t)u32Tmp; \ 1473 } while (0) 1474 #else /* IEM_WITH_SETJMP */ 1475 # define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 1254 1255 #define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 1476 1256 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1477 # 1257 #define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 1478 1258 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1479 # 1259 #define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1480 1260 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1481 # 1261 #define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 1482 1262 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1483 # 1263 #define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1484 1264 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1485 # 1265 #define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 1486 1266 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 1487 1267 1488 # 1268 #define IEM_MC_FETCH_MEM_FLAT_U8_SX_U16(a_u16Dst, a_GCPtrMem) \ 1489 1269 ((a_u16Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem))) 1490 # 1270 #define IEM_MC_FETCH_MEM_FLAT_U8_SX_U32(a_u32Dst, a_GCPtrMem) \ 1491 1271 ((a_u32Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem))) 1492 # 1272 #define IEM_MC_FETCH_MEM_FLAT_U8_SX_U64(a_u64Dst, a_GCPtrMem) \ 1493 1273 ((a_u64Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem))) 1494 # 1274 #define IEM_MC_FETCH_MEM_FLAT_U16_SX_U32(a_u32Dst, a_GCPtrMem) \ 1495 1275 ((a_u32Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem))) 1496 # 1276 #define IEM_MC_FETCH_MEM_FLAT_U16_SX_U64(a_u64Dst, a_GCPtrMem) \ 1497 1277 ((a_u64Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem))) 1498 # 1278 #define IEM_MC_FETCH_MEM_FLAT_U32_SX_U64(a_u64Dst, a_GCPtrMem) \ 1499 1279 ((a_u64Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem))) 1500 #endif /* IEM_WITH_SETJMP */ 1501 1502 #ifndef IEM_WITH_SETJMP 1503 # define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \ 1504 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))) 1505 # define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \ 1506 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))) 1507 # define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \ 1508 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))) 1509 # define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \ 1510 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))) 1511 #else 1512 # define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \ 1280 1281 #define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \ 1513 1282 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)) 1514 # 1283 #define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \ 1515 1284 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)) 1516 # 1285 #define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \ 1517 1286 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)) 1518 # 1287 #define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \ 1519 1288 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)) 1520 1289 1521 # 1290 #define IEM_MC_STORE_MEM_FLAT_U8(a_GCPtrMem, a_u8Value) \ 1522 1291 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8Value)) 1523 # 1292 #define IEM_MC_STORE_MEM_FLAT_U16(a_GCPtrMem, a_u16Value) \ 1524 1293 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16Value)) 1525 # 1294 #define IEM_MC_STORE_MEM_FLAT_U32(a_GCPtrMem, a_u32Value) \ 1526 1295 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32Value)) 1527 # 1296 #define IEM_MC_STORE_MEM_FLAT_U64(a_GCPtrMem, a_u64Value) \ 1528 1297 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64Value)) 1529 #endif 1530 1531 #ifndef IEM_WITH_SETJMP 1532 # define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \ 1533 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))) 1534 # define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \ 1535 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))) 1536 # define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \ 1537 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))) 1538 # define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \ 1539 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))) 1540 #else 1541 # define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \ 1298 1299 #define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \ 1542 1300 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)) 1543 # 1301 #define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \ 1544 1302 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)) 1545 # 1303 #define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \ 1546 1304 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)) 1547 # 1305 #define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \ 1548 1306 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)) 1549 1307 1550 # 1308 #define IEM_MC_STORE_MEM_FLAT_U8_CONST(a_GCPtrMem, a_u8C) \ 1551 1309 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8C)) 1552 # 1310 #define IEM_MC_STORE_MEM_FLAT_U16_CONST(a_GCPtrMem, a_u16C) \ 1553 1311 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16C)) 1554 # 1312 #define IEM_MC_STORE_MEM_FLAT_U32_CONST(a_GCPtrMem, a_u32C) \ 1555 1313 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32C)) 1556 # 1314 #define IEM_MC_STORE_MEM_FLAT_U64_CONST(a_GCPtrMem, a_u64C) \ 1557 1315 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64C)) 1558 #endif1559 1316 1560 1317 #define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C) … … 1575 1332 } while (0) 1576 1333 1577 #ifndef IEM_WITH_SETJMP 1578 # define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \ 1579 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))) 1580 # define IEM_MC_STORE_MEM_U128_NO_AC(a_iSeg, a_GCPtrMem, a_u128Value) \ 1581 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128NoAc(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))) 1582 # define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \ 1583 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))) 1584 #else 1585 # define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \ 1334 #define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \ 1586 1335 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value)) 1587 # 1336 #define IEM_MC_STORE_MEM_U128_NO_AC(a_iSeg, a_GCPtrMem, a_u128Value) \ 1588 1337 iemMemStoreDataU128NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value)) 1589 # 1338 #define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \ 1590 1339 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)) 1591 1340 1592 # 1341 #define IEM_MC_STORE_MEM_FLAT_U128(a_GCPtrMem, a_u128Value) \ 1593 1342 iemMemFlatStoreDataU128Jmp(pVCpu, (a_GCPtrMem), &(a_u128Value)) 1594 # 1343 #define IEM_MC_STORE_MEM_FLAT_U128_NO_AC(a_GCPtrMem, a_u128Value) \ 1595 1344 iemMemFlatStoreDataU128NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u128Value)) 1596 # 1345 #define IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE(a_GCPtrMem, a_u128Value) \ 1597 1346 iemMemStoreDataU128AlignedSseJmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u128Value)) 1598 #endif 1599 1600 #ifndef IEM_WITH_SETJMP 1601 # define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \ 1602 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))) 1603 # define IEM_MC_STORE_MEM_U256_NO_AC(a_iSeg, a_GCPtrMem, a_u256Value) \ 1604 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256NoAc(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))) 1605 # define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \ 1606 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))) 1607 #else 1608 # define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \ 1347 1348 #define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \ 1609 1349 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)) 1610 # 1350 #define IEM_MC_STORE_MEM_U256_NO_AC(a_iSeg, a_GCPtrMem, a_u256Value) \ 1611 1351 iemMemStoreDataU256NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)) 1612 # 1352 #define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \ 1613 1353 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)) 1614 1354 1615 # 1355 #define IEM_MC_STORE_MEM_FLAT_U256(a_GCPtrMem, a_u256Value) \ 1616 1356 iemMemFlatStoreDataU256Jmp(pVCpu, (a_GCPtrMem), &(a_u256Value)) 1617 # 1357 #define IEM_MC_STORE_MEM_FLAT_U256_NO_AC(a_GCPtrMem, a_u256Value) \ 1618 1358 iemMemFlatStoreDataU256NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u256Value)) 1619 # 1359 #define IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX(a_GCPtrMem, a_u256Value) \ 1620 1360 iemMemFlatStoreDataU256AlignedAvxJmp(pVCpu, (a_GCPtrMem), &(a_u256Value)) 1621 #endif1622 1361 1623 1362 /* Regular stack push and pop: */ 1624 #ifndef IEM_WITH_SETJMP 1625 # define IEM_MC_PUSH_U16(a_u16Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value))) 1626 # define IEM_MC_PUSH_U32(a_u32Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value))) 1627 # define IEM_MC_PUSH_U32_SREG(a_uSegVal) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_uSegVal))) 1628 # define IEM_MC_PUSH_U64(a_u64Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value))) 1629 1630 # define IEM_MC_POP_GREG_U16(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg))) 1631 # define IEM_MC_POP_GREG_U32(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU32(pVCpu, (a_iGReg))) 1632 # define IEM_MC_POP_GREG_U64(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU64(pVCpu, (a_iGReg))) 1633 #else 1634 # define IEM_MC_PUSH_U16(a_u16Value) iemMemStackPushU16Jmp(pVCpu, (a_u16Value)) 1635 # define IEM_MC_PUSH_U32(a_u32Value) iemMemStackPushU32Jmp(pVCpu, (a_u32Value)) 1636 # define IEM_MC_PUSH_U32_SREG(a_uSegVal) iemMemStackPushU32SRegJmp(pVCpu, (a_uSegVal)) 1637 # define IEM_MC_PUSH_U64(a_u64Value) iemMemStackPushU64Jmp(pVCpu, (a_u64Value)) 1638 1639 # define IEM_MC_POP_GREG_U16(a_iGReg) iemMemStackPopGRegU16Jmp(pVCpu, (a_iGReg)) 1640 # define IEM_MC_POP_GREG_U32(a_iGReg) iemMemStackPopGRegU32Jmp(pVCpu, (a_iGReg)) 1641 # define IEM_MC_POP_GREG_U64(a_iGReg) iemMemStackPopGRegU64Jmp(pVCpu, (a_iGReg)) 1642 #endif 1363 #define IEM_MC_PUSH_U16(a_u16Value) iemMemStackPushU16Jmp(pVCpu, (a_u16Value)) 1364 #define IEM_MC_PUSH_U32(a_u32Value) iemMemStackPushU32Jmp(pVCpu, (a_u32Value)) 1365 #define IEM_MC_PUSH_U32_SREG(a_uSegVal) iemMemStackPushU32SRegJmp(pVCpu, (a_uSegVal)) 1366 #define IEM_MC_PUSH_U64(a_u64Value) iemMemStackPushU64Jmp(pVCpu, (a_u64Value)) 1367 1368 #define IEM_MC_POP_GREG_U16(a_iGReg) iemMemStackPopGRegU16Jmp(pVCpu, (a_iGReg)) 1369 #define IEM_MC_POP_GREG_U32(a_iGReg) iemMemStackPopGRegU32Jmp(pVCpu, (a_iGReg)) 1370 #define IEM_MC_POP_GREG_U64(a_iGReg) iemMemStackPopGRegU64Jmp(pVCpu, (a_iGReg)) 1643 1371 1644 1372 /* 32-bit flat stack push and pop: */ 1645 #ifndef IEM_WITH_SETJMP 1646 # define IEM_MC_FLAT32_PUSH_U16(a_u16Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value))) 1647 # define IEM_MC_FLAT32_PUSH_U32(a_u32Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value))) 1648 # define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_uSegVal))) 1649 1650 # define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg))) 1651 # define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU32(pVCpu, (a_iGReg))) 1652 #else 1653 # define IEM_MC_FLAT32_PUSH_U16(a_u16Value) iemMemFlat32StackPushU16Jmp(pVCpu, (a_u16Value)) 1654 # define IEM_MC_FLAT32_PUSH_U32(a_u32Value) iemMemFlat32StackPushU32Jmp(pVCpu, (a_u32Value)) 1655 # define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal) iemMemFlat32StackPushU32SRegJmp(pVCpu, (a_uSegVal)) 1656 1657 # define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg) iemMemFlat32StackPopGRegU16Jmp(pVCpu, a_iGReg)) 1658 # define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg) iemMemFlat32StackPopGRegU32Jmp(pVCpu, a_iGReg)) 1659 #endif 1373 #define IEM_MC_FLAT32_PUSH_U16(a_u16Value) iemMemFlat32StackPushU16Jmp(pVCpu, (a_u16Value)) 1374 #define IEM_MC_FLAT32_PUSH_U32(a_u32Value) iemMemFlat32StackPushU32Jmp(pVCpu, (a_u32Value)) 1375 #define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal) iemMemFlat32StackPushU32SRegJmp(pVCpu, (a_uSegVal)) 1376 1377 #define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg) iemMemFlat32StackPopGRegU16Jmp(pVCpu, a_iGReg)) 1378 #define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg) iemMemFlat32StackPopGRegU32Jmp(pVCpu, a_iGReg)) 1660 1379 1661 1380 /* 64-bit flat stack push and pop: */ 1662 #ifndef IEM_WITH_SETJMP 1663 # define IEM_MC_FLAT64_PUSH_U16(a_u16Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value))) 1664 # define IEM_MC_FLAT64_PUSH_U64(a_u64Value) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value))) 1665 1666 # define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg))) 1667 # define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg) IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU64(pVCpu, (a_iGReg))) 1668 #else 1669 # define IEM_MC_FLAT64_PUSH_U16(a_u16Value) iemMemFlat64StackPushU16Jmp(pVCpu, (a_u16Value)) 1670 # define IEM_MC_FLAT64_PUSH_U64(a_u64Value) iemMemFlat64StackPushU64Jmp(pVCpu, (a_u64Value)) 1671 1672 # define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg) iemMemFlat64StackPopGRegU16Jmp(pVCpu, (a_iGReg)) 1673 # define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg) iemMemFlat64StackPopGRegU64Jmp(pVCpu, (a_iGReg)) 1674 #endif 1381 #define IEM_MC_FLAT64_PUSH_U16(a_u16Value) iemMemFlat64StackPushU16Jmp(pVCpu, (a_u16Value)) 1382 #define IEM_MC_FLAT64_PUSH_U64(a_u64Value) iemMemFlat64StackPushU64Jmp(pVCpu, (a_u64Value)) 1383 1384 #define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg) iemMemFlat64StackPopGRegU16Jmp(pVCpu, (a_iGReg)) 1385 #define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg) iemMemFlat64StackPopGRegU64Jmp(pVCpu, (a_iGReg)) 1675 1386 1676 1387 … … 1688 1399 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 1689 1400 */ 1690 #ifndef IEM_WITH_SETJMP 1691 # define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1692 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \ 1693 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, 0)) 1694 #else 1695 # define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1401 #define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1696 1402 (a_pu8Mem) = iemMemMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1697 #endif1698 1403 1699 1404 /** … … 1707 1412 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW 1708 1413 */ 1709 #ifndef IEM_WITH_SETJMP 1710 # define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1711 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \ 1712 (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0)) 1713 #else 1714 # define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1414 #define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1715 1415 (a_pu8Mem) = iemMemMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1716 #endif1717 1416 1718 1417 /** … … 1726 1425 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 1727 1426 */ 1728 #ifndef IEM_WITH_SETJMP 1729 # define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1730 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \ 1731 (a_GCPtrMem), IEM_ACCESS_DATA_W, 0)) 1732 #else 1733 # define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1427 #define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1734 1428 (a_pu8Mem) = iemMemMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1735 #endif1736 1429 1737 1430 /** … … 1745 1438 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO 1746 1439 */ 1747 #ifndef IEM_WITH_SETJMP 1748 # define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1749 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \ 1750 (a_GCPtrMem), IEM_ACCESS_DATA_R, 0)) 1751 #else 1752 # define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1440 #define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1753 1441 (a_pu8Mem) = iemMemMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1754 #endif1755 1442 1756 1443 /** … … 1764 1451 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 1765 1452 */ 1766 #ifndef IEM_WITH_SETJMP 1767 # define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1768 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \ 1769 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, 0)) 1770 #else 1771 # define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1453 #define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1772 1454 (a_pu8Mem) = iemMemFlatMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1773 #endif1774 1455 1775 1456 /** … … 1783 1464 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW 1784 1465 */ 1785 #ifndef IEM_WITH_SETJMP 1786 # define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1787 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \ 1788 (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0)) 1789 #else 1790 # define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1466 #define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1791 1467 (a_pu8Mem) = iemMemFlatMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1792 #endif1793 1468 1794 1469 /** … … 1802 1477 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 1803 1478 */ 1804 #ifndef IEM_WITH_SETJMP 1805 # define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1806 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \ 1807 (a_GCPtrMem), IEM_ACCESS_DATA_W, 0)) 1808 #else 1809 # define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1479 #define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1810 1480 (a_pu8Mem) = iemMemFlatMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1811 #endif1812 1481 1813 1482 /** … … 1821 1490 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO 1822 1491 */ 1823 #ifndef IEM_WITH_SETJMP 1824 # define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1825 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \ 1826 (a_GCPtrMem), IEM_ACCESS_DATA_R, 0)) 1827 #else 1828 # define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1492 #define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1829 1493 (a_pu8Mem) = iemMemFlatMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1830 #endif1831 1494 1832 1495 … … 1843 1506 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 1844 1507 */ 1845 #ifndef IEM_WITH_SETJMP 1846 # define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1847 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \ 1848 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1)) 1849 #else 1850 # define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1508 #define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1851 1509 (a_pu16Mem) = iemMemMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1852 #endif1853 1510 1854 1511 /** … … 1862 1519 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW 1863 1520 */ 1864 #ifndef IEM_WITH_SETJMP 1865 # define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1866 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \ 1867 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1)) 1868 #else 1869 # define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1521 #define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1870 1522 (a_pu16Mem) = iemMemMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1871 #endif1872 1523 1873 1524 /** … … 1881 1532 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 1882 1533 */ 1883 #ifndef IEM_WITH_SETJMP 1884 # define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1885 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \ 1886 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1)) 1887 #else 1888 # define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1534 #define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1889 1535 (a_pu16Mem) = iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1890 #endif1891 1536 1892 1537 /** … … 1900 1545 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO 1901 1546 */ 1902 #ifndef IEM_WITH_SETJMP 1903 # define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1904 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \ 1905 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1)) 1906 #else 1907 # define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1547 #define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1908 1548 (a_pu16Mem) = iemMemMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1909 #endif1910 1549 1911 1550 /** … … 1919 1558 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 1920 1559 */ 1921 #ifndef IEM_WITH_SETJMP 1922 # define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1923 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \ 1924 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1)) 1925 #else 1926 # define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1560 #define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1927 1561 (a_pu16Mem) = iemMemFlatMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1928 #endif1929 1562 1930 1563 /** … … 1938 1571 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW 1939 1572 */ 1940 #ifndef IEM_WITH_SETJMP 1941 # define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1942 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \ 1943 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1)) 1944 #else 1945 # define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1573 #define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1946 1574 (a_pu16Mem) = iemMemFlatMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1947 #endif1948 1575 1949 1576 /** … … 1957 1584 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 1958 1585 */ 1959 #ifndef IEM_WITH_SETJMP 1960 # define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1961 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \ 1962 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1)) 1963 #else 1964 # define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1586 #define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1965 1587 (a_pu16Mem) = iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1966 #endif1967 1588 1968 1589 /** … … 1976 1597 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO 1977 1598 */ 1978 #ifndef IEM_WITH_SETJMP 1979 # define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1980 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \ 1981 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1)) 1982 #else 1983 # define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1599 #define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1984 1600 (a_pu16Mem) = iemMemFlatMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1985 #endif1986 1601 1987 1602 /** int16_t alias. */ 1988 #ifndef IEM_WITH_SETJMP 1989 # define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1990 IEM_MC_MEM_MAP_U16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) 1991 #else 1992 # define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1603 #define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1993 1604 (a_pi16Mem) = (int16_t *)iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1994 #endif1995 1605 1996 1606 /** Flat int16_t alias. */ 1997 #ifndef IEM_WITH_SETJMP 1998 # define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1999 IEM_MC_MEM_FLAT_MAP_U16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) 2000 #else 2001 # define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1607 #define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \ 2002 1608 (a_pi16Mem) = (int16_t *)iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2003 #endif2004 1609 2005 1610 … … 2016 1621 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 2017 1622 */ 2018 #ifndef IEM_WITH_SETJMP 2019 # define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2020 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \ 2021 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1)) 2022 #else 2023 # define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1623 #define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2024 1624 (a_pu32Mem) = iemMemMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2025 #endif2026 1625 2027 1626 /** … … 2035 1634 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW 2036 1635 */ 2037 #ifndef IEM_WITH_SETJMP 2038 # define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2039 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \ 2040 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1)) 2041 #else 2042 # define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1636 #define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2043 1637 (a_pu32Mem) = iemMemMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2044 #endif2045 1638 2046 1639 /** … … 2054 1647 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2055 1648 */ 2056 #ifndef IEM_WITH_SETJMP 2057 # define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2058 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \ 2059 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1)) 2060 #else 2061 # define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1649 #define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2062 1650 (a_pu32Mem) = iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2063 #endif2064 1651 2065 1652 /** … … 2073 1660 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO 2074 1661 */ 2075 #ifndef IEM_WITH_SETJMP 2076 # define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2077 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \ 2078 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1)) 2079 #else 2080 # define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1662 #define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2081 1663 (a_pu32Mem) = iemMemMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2082 #endif2083 1664 2084 1665 /** … … 2092 1673 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 2093 1674 */ 2094 #ifndef IEM_WITH_SETJMP 2095 # define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 2096 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \ 2097 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1)) 2098 #else 2099 # define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1675 #define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 2100 1676 (a_pu32Mem) = iemMemFlatMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2101 #endif2102 1677 2103 1678 /** … … 2111 1686 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW 2112 1687 */ 2113 #ifndef IEM_WITH_SETJMP 2114 # define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 2115 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \ 2116 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1)) 2117 #else 2118 # define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1688 #define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 2119 1689 (a_pu32Mem) = iemMemFlatMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2120 #endif2121 1690 2122 1691 /** … … 2130 1699 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2131 1700 */ 2132 #ifndef IEM_WITH_SETJMP 2133 # define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 2134 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \ 2135 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1)) 2136 #else 2137 # define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1701 #define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 2138 1702 (a_pu32Mem) = iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2139 #endif2140 1703 2141 1704 /** … … 2149 1712 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO 2150 1713 */ 2151 #ifndef IEM_WITH_SETJMP 2152 # define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 2153 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \ 2154 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1)) 2155 #else 2156 # define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1714 #define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 2157 1715 (a_pu32Mem) = iemMemFlatMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2158 #endif2159 1716 2160 1717 /** int32_t alias. */ 2161 #ifndef IEM_WITH_SETJMP 2162 # define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2163 IEM_MC_MEM_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) 2164 #else 2165 # define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1718 #define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2166 1719 (a_pi32Mem) = (int32_t *)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2167 #endif2168 1720 2169 1721 /** Flat int32_t alias. */ 2170 #ifndef IEM_WITH_SETJMP 2171 # define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \ 2172 IEM_MC_MEM_FLAT_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) 2173 #else 2174 # define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1722 #define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \ 2175 1723 (a_pi32Mem) = (int32_t *)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2176 #endif2177 1724 2178 1725 /** RTFLOAT32U alias. */ 2179 #ifndef IEM_WITH_SETJMP 2180 # define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2181 IEM_MC_MEM_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) 2182 #else 2183 # define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1726 #define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2184 1727 (a_pr32Mem) = (PRTFLOAT32U)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2185 #endif2186 1728 2187 1729 /** Flat RTFLOAT32U alias. */ 2188 #ifndef IEM_WITH_SETJMP 2189 # define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \ 2190 IEM_MC_MEM_FLAT_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) 2191 #else 2192 # define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1730 #define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \ 2193 1731 (a_pr32Mem) = (PRTFLOAT32U)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2194 #endif2195 1732 2196 1733 … … 2207 1744 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 2208 1745 */ 2209 #ifndef IEM_WITH_SETJMP 2210 # define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2211 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \ 2212 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1)) 2213 #else 2214 # define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1746 #define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2215 1747 (a_pu64Mem) = iemMemMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2216 #endif2217 1748 2218 1749 /** … … 2226 1757 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW 2227 1758 */ 2228 #ifndef IEM_WITH_SETJMP 2229 # define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2230 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \ 2231 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1)) 2232 #else 2233 # define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1759 #define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2234 1760 (a_pu64Mem) = iemMemMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2235 #endif2236 1761 2237 1762 /** … … 2245 1770 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2246 1771 */ 2247 #ifndef IEM_WITH_SETJMP 2248 # define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2249 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \ 2250 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)) 2251 #else 2252 # define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1772 #define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2253 1773 (a_pu64Mem) = iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2254 #endif2255 1774 2256 1775 /** … … 2264 1783 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO 2265 1784 */ 2266 #ifndef IEM_WITH_SETJMP 2267 # define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2268 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \ 2269 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1)) 2270 #else 2271 # define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1785 #define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2272 1786 (a_pu64Mem) = iemMemMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2273 #endif2274 1787 2275 1788 /** … … 2283 1796 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 2284 1797 */ 2285 #ifndef IEM_WITH_SETJMP 2286 # define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2287 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \ 2288 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1)) 2289 #else 2290 # define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 1798 #define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2291 1799 (a_pu64Mem) = iemMemFlatMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2292 #endif2293 1800 2294 1801 /** … … 2302 1809 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW 2303 1810 */ 2304 #ifndef IEM_WITH_SETJMP 2305 # define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2306 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \ 2307 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1)) 2308 #else 2309 # define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 1811 #define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2310 1812 (a_pu64Mem) = iemMemFlatMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2311 #endif2312 1813 2313 1814 /** … … 2321 1822 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2322 1823 */ 2323 #ifndef IEM_WITH_SETJMP 2324 # define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2325 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \ 2326 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)) 2327 #else 2328 # define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 1824 #define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2329 1825 (a_pu64Mem) = iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2330 #endif2331 1826 2332 1827 /** … … 2340 1835 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO 2341 1836 */ 2342 #ifndef IEM_WITH_SETJMP 2343 # define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2344 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \ 2345 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1)) 2346 #else 2347 # define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 1837 #define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2348 1838 (a_pu64Mem) = iemMemFlatMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2349 #endif2350 1839 2351 1840 /** int64_t alias. */ 2352 #ifndef IEM_WITH_SETJMP 2353 # define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2354 IEM_MC_MEM_MAP_U64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) 2355 #else 2356 # define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1841 #define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2357 1842 (a_pi64Mem) = (int64_t *)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2358 #endif2359 1843 2360 1844 /** Flat int64_t alias. */ 2361 #ifndef IEM_WITH_SETJMP 2362 # define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2363 IEM_MC_MEM_FLAT_MAP_U64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) 2364 #else 2365 # define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \ 1845 #define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2366 1846 (a_pi64Mem) = (int64_t *)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2367 #endif2368 1847 2369 1848 /** RTFLOAT64U alias. */ 2370 #ifndef IEM_WITH_SETJMP 2371 # define IEM_MC_MEM_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2372 IEM_MC_MEM_MAP_U64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) 2373 #else 2374 # define IEM_MC_MEM_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1849 #define IEM_MC_MEM_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2375 1850 (a_pr64Mem) = (PRTFLOAT64U)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2376 #endif2377 1851 2378 1852 /** Flat RTFLOAT64U alias. */ 2379 #ifndef IEM_WITH_SETJMP 2380 # define IEM_MC_MEM_FLAT_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2381 IEM_MC_MEM_FLAT_MAP_U64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) 2382 #else 2383 # define IEM_MC_MEM_FLAT_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) \ 1853 #define IEM_MC_MEM_FLAT_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2384 1854 (a_pr64Mem) = (PRTFLOAT64U)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2385 #endif2386 1855 2387 1856 … … 2398 1867 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 2399 1868 */ 2400 #ifndef IEM_WITH_SETJMP 2401 # define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2402 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128U), (a_iSeg), \ 2403 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128U) - 1)) 2404 #else 2405 # define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1869 #define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2406 1870 (a_pu128Mem) = iemMemMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2407 #endif2408 1871 2409 1872 /** … … 2417 1880 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW 2418 1881 */ 2419 #ifndef IEM_WITH_SETJMP 2420 # define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2421 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128U), (a_iSeg), \ 2422 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1)) 2423 #else 2424 # define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1882 #define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2425 1883 (a_pu128Mem) = iemMemMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2426 #endif2427 1884 2428 1885 /** … … 2436 1893 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2437 1894 */ 2438 #ifndef IEM_WITH_SETJMP 2439 # define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2440 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), (a_iSeg), \ 2441 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1)) 2442 #else 2443 # define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1895 #define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2444 1896 (a_pu128Mem) = iemMemMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2445 #endif2446 1897 2447 1898 /** … … 2455 1906 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO 2456 1907 */ 2457 #ifndef IEM_WITH_SETJMP 2458 # define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2459 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), (a_iSeg), \ 2460 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1)) 2461 #else 2462 # define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1908 #define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2463 1909 (a_pu128Mem) = iemMemMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2464 #endif2465 1910 2466 1911 /** … … 2474 1919 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 2475 1920 */ 2476 #ifndef IEM_WITH_SETJMP 2477 # define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 2478 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \ 2479 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128) - 1)) 2480 #else 2481 # define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 1921 #define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 2482 1922 (a_pu128Mem) = iemMemFlatMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2483 #endif2484 1923 2485 1924 /** … … 2493 1932 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RW 2494 1933 */ 2495 #ifndef IEM_WITH_SETJMP 2496 # define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 2497 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \ 2498 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128) - 1)) 2499 #else 2500 # define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 1934 #define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 2501 1935 (a_pu128Mem) = iemMemFlatMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2502 #endif2503 1936 2504 1937 /** … … 2512 1945 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2513 1946 */ 2514 #ifndef IEM_WITH_SETJMP 2515 # define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 2516 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \ 2517 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1)) 2518 #else 2519 # define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 1947 #define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 2520 1948 (a_pu128Mem) = iemMemFlatMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2521 #endif2522 1949 2523 1950 /** … … 2531 1958 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_RO 2532 1959 */ 2533 #ifndef IEM_WITH_SETJMP 2534 # define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 2535 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \ 2536 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1)) 2537 #else 2538 # define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 1960 #define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 2539 1961 (a_pu128Mem) = iemMemFlatMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2540 #endif2541 1962 2542 1963 … … 2553 1974 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2554 1975 */ 2555 #ifndef IEM_WITH_SETJMP 2556 # define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2557 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), (a_iSeg), \ 2558 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)) 2559 #else 2560 # define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1976 #define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2561 1977 (a_pr80Mem) = iemMemMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2562 #endif2563 1978 2564 1979 /** … … 2571 1986 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2572 1987 */ 2573 #ifndef IEM_WITH_SETJMP 2574 # define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \ 2575 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), UINT8_MAX, \ 2576 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)) 2577 #else 2578 # define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \ 1988 #define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \ 2579 1989 (a_pr80Mem) = iemMemFlatMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2580 #endif2581 1990 2582 1991 … … 2591 2000 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2592 2001 */ 2593 #ifndef IEM_WITH_SETJMP 2594 # define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2595 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), (a_iSeg), \ 2596 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)) 2597 #else 2598 # define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2002 #define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2599 2003 (a_pd80Mem) = iemMemMapDataD80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2600 #endif2601 2004 2602 2005 /** … … 2609 2012 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2610 2013 */ 2611 #ifndef IEM_WITH_SETJMP 2612 # define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \ 2613 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), UINT8_MAX, \ 2614 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)) 2615 #else 2616 # define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \ 2014 #define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \ 2617 2015 (a_pd80Mem) = iemMemFlatMapDataD80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2618 #endif2619 2016 2620 2017 … … 2626 2023 * @note Implictly frees the a_bMapInfo variable. 2627 2024 */ 2628 #ifndef IEM_WITH_SETJMP 2629 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo)) 2630 #else 2631 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo)) 2632 #endif 2025 #define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo)) 2633 2026 2634 2027 /** Commits the memory and unmaps guest memory previously mapped ATOMIC. … … 2636 2029 * @note Implictly frees the a_bMapInfo variable. 2637 2030 */ 2638 #ifndef IEM_WITH_SETJMP 2639 # define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo)) 2640 #else 2641 # define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo)) 2642 #endif 2031 #define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo)) 2643 2032 2644 2033 /** Commits the memory and unmaps guest memory previously mapped W. … … 2646 2035 * @note Implictly frees the a_bMapInfo variable. 2647 2036 */ 2648 #ifndef IEM_WITH_SETJMP 2649 # define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo)) 2650 #else 2651 # define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo) iemMemCommitAndUnmapWoJmp(pVCpu, (a_bMapInfo)) 2652 #endif 2037 #define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo) iemMemCommitAndUnmapWoJmp(pVCpu, (a_bMapInfo)) 2653 2038 2654 2039 /** Commits the memory and unmaps guest memory previously mapped R. … … 2656 2041 * @note Implictly frees the a_bMapInfo variable. 2657 2042 */ 2658 #ifndef IEM_WITH_SETJMP 2659 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo)) 2660 #else 2661 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo) iemMemCommitAndUnmapRoJmp(pVCpu, (a_bMapInfo)) 2662 #endif 2043 #define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo) iemMemCommitAndUnmapRoJmp(pVCpu, (a_bMapInfo)) 2663 2044 2664 2045 … … 2673 2054 * @note Implictly frees both the a_bMapInfo and a_u16FSW variables. 2674 2055 */ 2675 #ifndef IEM_WITH_SETJMP 2676 # define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_bMapInfo, a_u16FSW) do { \ 2677 if ( !(a_u16FSW & X86_FSW_ES) \ 2678 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \ 2679 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \ 2680 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo)); \ 2681 else \ 2682 iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \ 2683 } while (0) 2684 #else 2685 # define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_bMapInfo, a_u16FSW) do { \ 2056 #define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_bMapInfo, a_u16FSW) do { \ 2686 2057 if ( !(a_u16FSW & X86_FSW_ES) \ 2687 2058 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \ … … 2691 2062 iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo); \ 2692 2063 } while (0) 2693 #endif2694 2064 2695 2065 /** Rolls back (conceptually only, assumes no writes) and unmaps the guest memory. 2696 2066 * @note Implictly frees the a_bMapInfo variable. */ 2697 #ifndef IEM_WITH_SETJMP 2698 # define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo) iemMemRollbackAndUnmap(pVCpu, a_bMapInfo) 2699 #else 2700 # define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo) iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo) 2701 #endif 2067 #define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo) iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo) 2702 2068 2703 2069 2704 2070 2705 2071 /** Calculate efficient address from R/M. */ 2706 #ifndef IEM_WITH_SETJMP 2707 # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \ 2708 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &(a_GCPtrEff))) 2709 #else 2710 # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \ 2072 #define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \ 2711 2073 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (a_bRm), (a_cbImmAndRspOffset))) 2712 #endif2713 2074 2714 2075
Note:
See TracChangeset
for help on using the changeset viewer.