Changeset 102977 in vbox
- Timestamp:
- Jan 19, 2024 11:11:30 PM (12 months ago)
- Location:
- trunk
- Files:
-
- 19 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/err.h
r102733 r102977 273 273 /** Trick for resuming EMHistoryExec after a VMCPU_FF_IOM is handled. */ 274 274 #define VINF_EM_RESUME_R3_HISTORY_EXEC 1161 275 /** Emulate split-lock access on SMP. */ 275 /** Emulate split-lock access on SMP. 276 * This is also used for dealing with locked alignment conflicts with the host 277 * in general from IEM. */ 276 278 #define VINF_EM_EMULATE_SPLIT_LOCK 1162 277 279 /** @} */ -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r102876 r102977 6219 6219 * @param GCPtrMem The address of the guest memory. 6220 6220 * @param fAccess How the memory is being accessed. The 6221 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map 6222 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used 6223 * when raising exceptions. 6221 * IEM_ACCESS_TYPE_XXX part is used to figure out how to 6222 * map the memory, while the IEM_ACCESS_WHAT_XXX part is 6223 * used when raising exceptions. The IEM_ACCESS_ATOMIC and 6224 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be 6225 * set. 6224 6226 * @param uAlignCtl Alignment control: 6225 6227 * - Bits 15:0 is the alignment mask. … … 6238 6240 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94 6239 6241 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) ); 6240 Assert( ~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));6242 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE))); 6241 6243 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)); 6242 6244 … … 6297 6299 return iemRaiseGeneralProtectionFault0(pVCpu); 6298 6300 } 6301 6302 #if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64) 6303 /* If the access is atomic there are host platform alignmnet restrictions 6304 we need to conform with. */ 6305 if ( !(fAccess & IEM_ACCESS_ATOMIC) 6306 # if defined(RT_ARCH_AMD64) 6307 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */ 6308 # elif defined(RT_ARCH_ARM64) 6309 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */ 6310 # else 6311 # error port me 6312 # endif 6313 ) 6314 { /* okay */ } 6315 else 6316 { 6317 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem)); 6318 pVCpu->iem.s.cMisalignedAtomics += 1; 6319 return VINF_EM_EMULATE_SPLIT_LOCK; 6320 } 6321 #endif 6299 6322 } 6300 6323 … … 6586 6609 * is required (for IDT, GDT and LDT accesses). 6587 6610 * @param GCPtrMem The address of the guest memory. 6588 * @param fAccess How the memory is being accessed. The 6589 * IEM_ACCESS_TYPE_XXX bit is used to figure out 6590 * how to map the memory, while the 6591 * IEM_ACCESS_WHAT_XXX bit is used when raising 6592 * exceptions. 6611 * @param fAccess How the memory is being accessed. The 6612 * IEM_ACCESS_TYPE_XXX part is used to figure out how to 6613 * map the memory, while the IEM_ACCESS_WHAT_XXX part is 6614 * used when raising exceptions. The IEM_ACCESS_ATOMIC and 6615 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be 6616 * set. 6593 6617 * @param uAlignCtl Alignment control: 6594 6618 * - Bits 15:0 is the alignment mask. … … 6606 6630 */ 6607 6631 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */ 6608 Assert( ~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));6632 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE))); 6609 6633 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)); 6610 6634 … … 6642 6666 iemRaiseGeneralProtectionFault0Jmp(pVCpu); 6643 6667 } 6668 6669 #if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64) 6670 /* If the access is atomic there are host platform alignmnet restrictions 6671 we need to conform with. */ 6672 if ( !(fAccess & IEM_ACCESS_ATOMIC) 6673 # if defined(RT_ARCH_AMD64) 6674 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */ 6675 # elif defined(RT_ARCH_ARM64) 6676 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */ 6677 # else 6678 # error port me 6679 # endif 6680 ) 6681 { /* okay */ } 6682 else 6683 { 6684 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem)); 6685 pVCpu->iem.s.cMisalignedAtomics += 1; 6686 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK); 6687 } 6688 #endif 6644 6689 } 6645 6690 … … 6924 6969 /** Fallback for iemMemCommitAndUnmapRwJmp. */ 6925 6970 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP 6971 { 6972 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE)); 6973 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 6974 } 6975 6976 6977 /** Fallback for iemMemCommitAndUnmapAtJmp. */ 6978 void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP 6926 6979 { 6927 6980 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE)); -
trunk/src/VBox/VMM/VMMAll/IEMAllAImpl-arm64.S
r102549 r102977 38 38 ret 39 39 40 /* Some sketches. 41 42 // IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_locked, (uint8_t *pu8Mem, uint8_t *pu8Reg)); 43 .p2align 2 44 .private_extern NAME(iemAImpl_xchg_u8_locked) 45 .globl NAME(iemAImpl_xchg_u8_locked) 46 NAME(iemAImpl_xchg_u8_locked): 47 ldrb w2, [x1] 48 swpalb w2, w2, [x0] 49 strb w2, [x1] 50 ret 51 52 // IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_locked,(uint16_t *pu16Mem, uint16_t *pu16Reg)); 53 .p2align 2 54 .private_extern NAME(iemAImpl_xchg_u16_locked) 55 .globl NAME(iemAImpl_xchg_u16_locked) 56 NAME(iemAImpl_xchg_u16_locked): 57 ldrh w2, [x1] 58 swpalh w2, w2, [x0] 59 strh w2, [x1] 60 ret 61 62 // IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_locked,(uint32_t *pu32Mem, uint32_t *pu32Reg)); 63 // IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_locked,(uint64_t *pu64Mem, uint64_t *pu64Reg)); 64 65 */ -
trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h
r102876 r102977 60 60 * Body for instructions like ADD, AND, OR, TEST, CMP, ++ with a byte 61 61 * memory/register as the destination. 62 * 63 * Used with IEMOP_BODY_BINARY_rm_r8_NO_LOCK or IEMOP_BODY_BINARY_rm_r8_LOCKED. 64 */ 65 #define IEMOP_BODY_BINARY_rm_r8_RW(a_fnNormalU8) \ 62 */ 63 #define IEMOP_BODY_BINARY_rm_r8_RW(a_fnNormalU8, a_fnLockedU8) \ 66 64 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); \ 67 65 \ … … 115 113 else \ 116 114 { \ 117 (void)0 115 IEM_MC_BEGIN(3, 3, 0, 0); \ 116 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ 117 IEM_MC_ARG(uint8_t, u8Src, 1); \ 118 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); \ 119 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 120 IEM_MC_LOCAL(uint8_t, bMapInfoDst); \ 121 \ 122 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 123 IEMOP_HLP_DONE_DECODING(); \ 124 IEM_MC_MEM_MAP_U8_ATOMIC(pu8Dst, bMapInfoDst, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 125 IEM_MC_FETCH_GREG_U8(u8Src, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 126 IEM_MC_FETCH_EFLAGS(EFlags); \ 127 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU8, pu8Dst, u8Src, pEFlags); \ 128 \ 129 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bMapInfoDst); \ 130 IEM_MC_COMMIT_EFLAGS(EFlags); \ 131 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 132 IEM_MC_END(); \ 133 } \ 134 } \ 135 (void)0 118 136 119 137 /** 120 138 * Body for instructions like TEST & CMP, ++ with a byte memory/registers as 121 139 * operands. 122 *123 * Used with IEMOP_BODY_BINARY_rm_r8_NO_LOCK or IEMOP_BODY_BINARY_rm_r8_LOCKED.124 140 */ 125 141 #define IEMOP_BODY_BINARY_rm_r8_RO(a_fnNormalU8) \ … … 152 168 * after the memory. \ 153 169 */ \ 154 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK) || (pVCpu->iem.s.fExec & IEM_F_X86_DISREGARD_LOCK)) \170 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) \ 155 171 { \ 156 172 IEM_MC_BEGIN(3, 3, 0, 0); \ … … 175 191 else \ 176 192 { \ 177 (void)0178 179 #define IEMOP_BODY_BINARY_rm_r8_NO_LOCK() \180 193 IEMOP_HLP_DONE_DECODING(); \ 181 194 IEMOP_RAISE_INVALID_LOCK_PREFIX_RET(); \ 182 } \183 } \184 (void)0185 186 #define IEMOP_BODY_BINARY_rm_r8_LOCKED(a_fnLockedU8) \187 IEM_MC_BEGIN(3, 3, 0, 0); \188 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \189 IEM_MC_ARG(uint8_t, u8Src, 1); \190 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); \191 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \192 IEM_MC_LOCAL(uint8_t, bMapInfoDst); \193 \194 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \195 IEMOP_HLP_DONE_DECODING(); \196 IEM_MC_MEM_MAP_U8_RW(pu8Dst, bMapInfoDst, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \197 IEM_MC_FETCH_GREG_U8(u8Src, IEM_GET_MODRM_REG(pVCpu, bRm)); \198 IEM_MC_FETCH_EFLAGS(EFlags); \199 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU8, pu8Dst, u8Src, pEFlags); \200 \201 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bMapInfoDst); \202 IEM_MC_COMMIT_EFLAGS(EFlags); \203 IEM_MC_ADVANCE_RIP_AND_FINISH(); \204 IEM_MC_END(); \205 195 } \ 206 196 } \ … … 417 407 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 418 408 IEMOP_HLP_DONE_DECODING(); \ 419 IEM_MC_MEM_MAP_U16_ RW(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \409 IEM_MC_MEM_MAP_U16_ATOMIC(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 420 410 IEM_MC_FETCH_GREG_U16(u16Src, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 421 411 IEM_MC_FETCH_EFLAGS(EFlags); \ 422 412 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU16, pu16Dst, u16Src, pEFlags); \ 423 413 \ 424 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \414 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 425 415 IEM_MC_COMMIT_EFLAGS(EFlags); \ 426 416 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 438 428 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 439 429 IEMOP_HLP_DONE_DECODING(); \ 440 IEM_MC_MEM_MAP_U32_ RW(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \430 IEM_MC_MEM_MAP_U32_ATOMIC(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 441 431 IEM_MC_FETCH_GREG_U32(u32Src, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 442 432 IEM_MC_FETCH_EFLAGS(EFlags); \ 443 433 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU32, pu32Dst, u32Src, pEFlags); \ 444 434 \ 445 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo /* CMP,TEST */); \435 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo /* CMP,TEST */); \ 446 436 IEM_MC_COMMIT_EFLAGS(EFlags); \ 447 437 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 459 449 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 460 450 IEMOP_HLP_DONE_DECODING(); \ 461 IEM_MC_MEM_MAP_U64_ RW(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \451 IEM_MC_MEM_MAP_U64_ATOMIC(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 462 452 IEM_MC_FETCH_GREG_U64(u64Src, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 463 453 IEM_MC_FETCH_EFLAGS(EFlags); \ 464 454 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU64, pu64Dst, u64Src, pEFlags); \ 465 455 \ 466 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \456 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 467 457 IEM_MC_COMMIT_EFLAGS(EFlags); \ 468 458 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 737 727 { 738 728 IEMOP_MNEMONIC2(MR, ADD, add, Eb, Gb, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES | IEMOPHINT_LOCK_ALLOWED); 739 IEMOP_BODY_BINARY_rm_r8_RW( iemAImpl_add_u8); 740 IEMOP_BODY_BINARY_rm_r8_LOCKED(iemAImpl_add_u8_locked); 729 IEMOP_BODY_BINARY_rm_r8_RW(iemAImpl_add_u8, iemAImpl_add_u8_locked); 741 730 } 742 731 … … 860 849 IEMOP_MNEMONIC2(MR, OR, or, Eb, Gb, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES | IEMOPHINT_LOCK_ALLOWED); 861 850 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF); 862 IEMOP_BODY_BINARY_rm_r8_RW( iemAImpl_or_u8); 863 IEMOP_BODY_BINARY_rm_r8_LOCKED(iemAImpl_or_u8_locked); 851 IEMOP_BODY_BINARY_rm_r8_RW(iemAImpl_or_u8, iemAImpl_or_u8_locked); 864 852 } 865 853 … … 1029 1017 { 1030 1018 IEMOP_MNEMONIC2(MR, ADC, adc, Eb, Gb, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES | IEMOPHINT_LOCK_ALLOWED); 1031 IEMOP_BODY_BINARY_rm_r8_RW( iemAImpl_adc_u8); 1032 IEMOP_BODY_BINARY_rm_r8_LOCKED(iemAImpl_adc_u8_locked); 1019 IEMOP_BODY_BINARY_rm_r8_RW(iemAImpl_adc_u8, iemAImpl_adc_u8_locked); 1033 1020 } 1034 1021 … … 1150 1137 { 1151 1138 IEMOP_MNEMONIC2(MR, SBB, sbb, Eb, Gb, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES | IEMOPHINT_LOCK_ALLOWED); 1152 IEMOP_BODY_BINARY_rm_r8_RW( iemAImpl_sbb_u8); 1153 IEMOP_BODY_BINARY_rm_r8_LOCKED(iemAImpl_sbb_u8_locked); 1139 IEMOP_BODY_BINARY_rm_r8_RW(iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked); 1154 1140 } 1155 1141 … … 1263 1249 IEMOP_MNEMONIC2(MR, AND, and, Eb, Gb, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES | IEMOPHINT_LOCK_ALLOWED); 1264 1250 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF); 1265 IEMOP_BODY_BINARY_rm_r8_RW( iemAImpl_and_u8); 1266 IEMOP_BODY_BINARY_rm_r8_LOCKED(iemAImpl_and_u8_locked); 1251 IEMOP_BODY_BINARY_rm_r8_RW(iemAImpl_and_u8, iemAImpl_and_u8_locked); 1267 1252 } 1268 1253 … … 1388 1373 { 1389 1374 IEMOP_MNEMONIC2(MR, SUB, sub, Eb, Gb, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES | IEMOPHINT_LOCK_ALLOWED); 1390 IEMOP_BODY_BINARY_rm_r8_RW( iemAImpl_sub_u8); 1391 IEMOP_BODY_BINARY_rm_r8_LOCKED(iemAImpl_sub_u8_locked); 1375 IEMOP_BODY_BINARY_rm_r8_RW(iemAImpl_sub_u8, iemAImpl_sub_u8_locked); 1392 1376 } 1393 1377 … … 1501 1485 IEMOP_MNEMONIC2(MR, XOR, xor, Eb, Gb, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES | IEMOPHINT_LOCK_ALLOWED); 1502 1486 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF); 1503 IEMOP_BODY_BINARY_rm_r8_RW( iemAImpl_xor_u8); 1504 IEMOP_BODY_BINARY_rm_r8_LOCKED(iemAImpl_xor_u8_locked); 1487 IEMOP_BODY_BINARY_rm_r8_RW(iemAImpl_xor_u8, iemAImpl_xor_u8_locked); 1505 1488 } 1506 1489 … … 1658 1641 IEMOP_MNEMONIC(cmp_Eb_Gb, "cmp Eb,Gb"); 1659 1642 IEMOP_BODY_BINARY_rm_r8_RO(iemAImpl_cmp_u8); 1660 IEMOP_BODY_BINARY_rm_r8_NO_LOCK();1661 1643 } 1662 1644 … … 3957 3939 IEMOP_HLP_DONE_DECODING(); \ 3958 3940 \ 3959 IEM_MC_MEM_MAP_U8_ RW(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \3941 IEM_MC_MEM_MAP_U8_ATOMIC(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 3960 3942 IEM_MC_FETCH_EFLAGS(EFlags); \ 3961 3943 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU8, pu8Dst, u8Src, pEFlags); \ 3962 3944 \ 3963 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \3945 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 3964 3946 IEM_MC_COMMIT_EFLAGS(EFlags); \ 3965 3947 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 4313 4295 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 4314 4296 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 4315 IEM_MC_MEM_MAP_U16_ RW(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \4297 IEM_MC_MEM_MAP_U16_ATOMIC(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4316 4298 \ 4317 4299 IEM_MC_ARG_CONST(uint16_t, u16Src, u16Imm, 1); \ … … 4320 4302 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU16, pu16Dst, u16Src, pEFlags); \ 4321 4303 \ 4322 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \4304 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 4323 4305 IEM_MC_COMMIT_EFLAGS(EFlags); \ 4324 4306 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 4338 4320 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 4339 4321 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 4340 IEM_MC_MEM_MAP_U32_ RW(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \4322 IEM_MC_MEM_MAP_U32_ATOMIC(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4341 4323 \ 4342 4324 IEM_MC_ARG_CONST(uint32_t, u32Src, u32Imm, 1); \ … … 4345 4327 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU32, pu32Dst, u32Src, pEFlags); \ 4346 4328 \ 4347 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \4329 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 4348 4330 IEM_MC_COMMIT_EFLAGS(EFlags); \ 4349 4331 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 4363 4345 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 4364 4346 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 4365 IEM_MC_MEM_MAP_U64_ RW(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \4347 IEM_MC_MEM_MAP_U64_ATOMIC(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4366 4348 \ 4367 4349 IEM_MC_ARG_CONST(uint64_t, u64Src, u64Imm, 1); \ … … 4370 4352 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU64, pu64Dst, u64Src, pEFlags); \ 4371 4353 \ 4372 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \4354 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 4373 4355 IEM_MC_COMMIT_EFLAGS(EFlags); \ 4374 4356 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 4829 4811 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 4830 4812 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 4831 IEM_MC_MEM_MAP_U16_ RW(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \4813 IEM_MC_MEM_MAP_U16_ATOMIC(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4832 4814 \ 4833 4815 IEM_MC_ARG_CONST(uint16_t, u16Src, (int16_t)(int8_t)u8Imm, 1); \ … … 4836 4818 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU16, pu16Dst, u16Src, pEFlags); \ 4837 4819 \ 4838 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \4820 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 4839 4821 IEM_MC_COMMIT_EFLAGS(EFlags); \ 4840 4822 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 4852 4834 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 4853 4835 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 4854 IEM_MC_MEM_MAP_U32_ RW(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \4836 IEM_MC_MEM_MAP_U32_ATOMIC(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4855 4837 \ 4856 4838 IEM_MC_ARG_CONST(uint32_t, u32Src, (int32_t)(int8_t)u8Imm, 1); \ … … 4859 4841 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU32, pu32Dst, u32Src, pEFlags); \ 4860 4842 \ 4861 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \4843 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 4862 4844 IEM_MC_COMMIT_EFLAGS(EFlags); \ 4863 4845 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 4875 4857 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 4876 4858 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 4877 IEM_MC_MEM_MAP_U64_ RW(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \4859 IEM_MC_MEM_MAP_U64_ATOMIC(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4878 4860 \ 4879 4861 IEM_MC_ARG_CONST(uint64_t, u64Src, (int64_t)(int8_t)u8Imm, 1); \ … … 4882 4864 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU64, pu64Dst, u64Src, pEFlags); \ 4883 4865 \ 4884 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \4866 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 4885 4867 IEM_MC_COMMIT_EFLAGS(EFlags); \ 4886 4868 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 5168 5150 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF); 5169 5151 IEMOP_BODY_BINARY_rm_r8_RO(iemAImpl_test_u8); 5170 IEMOP_BODY_BINARY_rm_r8_NO_LOCK();5171 5152 } 5172 5153 … … 5214 5195 * We're accessing memory. 5215 5196 */ 5216 #define IEMOP_XCHG_BYTE(a_fnWorker ) \5197 #define IEMOP_XCHG_BYTE(a_fnWorker, a_Style) \ 5217 5198 IEM_MC_BEGIN(2, 4, 0, 0); \ 5218 5199 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ … … 5224 5205 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 5225 5206 IEMOP_HLP_DONE_DECODING(); /** @todo testcase: lock xchg */ \ 5226 IEM_MC_MEM_MAP_U8_ RW(pu8Mem, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \5207 IEM_MC_MEM_MAP_U8_##a_Style(pu8Mem, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 5227 5208 IEM_MC_FETCH_GREG_U8(uTmpReg, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 5228 5209 IEM_MC_CALL_VOID_AIMPL_2(a_fnWorker, pu8Mem, pu8Reg); \ 5229 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \5210 IEM_MC_MEM_COMMIT_AND_UNMAP_##a_Style(bUnmapInfo); \ 5230 5211 IEM_MC_STORE_GREG_U8(IEM_GET_MODRM_REG(pVCpu, bRm), uTmpReg); \ 5231 5212 \ … … 5235 5216 if (!(pVCpu->iem.s.fExec & IEM_F_X86_DISREGARD_LOCK)) 5236 5217 { 5237 IEMOP_XCHG_BYTE(iemAImpl_xchg_u8_locked );5218 IEMOP_XCHG_BYTE(iemAImpl_xchg_u8_locked,ATOMIC); 5238 5219 } 5239 5220 else 5240 5221 { 5241 IEMOP_XCHG_BYTE(iemAImpl_xchg_u8_unlocked );5222 IEMOP_XCHG_BYTE(iemAImpl_xchg_u8_unlocked,RW); 5242 5223 } 5243 5224 } … … 5313 5294 * We're accessing memory. 5314 5295 */ 5315 #define IEMOP_XCHG_EV_GV(a_fnWorker16, a_fnWorker32, a_fnWorker64 ) \5296 #define IEMOP_XCHG_EV_GV(a_fnWorker16, a_fnWorker32, a_fnWorker64, a_Type) \ 5316 5297 do { \ 5317 5298 switch (pVCpu->iem.s.enmEffOpSize) \ … … 5327 5308 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 5328 5309 IEMOP_HLP_DONE_DECODING(); /** @todo testcase: lock xchg */ \ 5329 IEM_MC_MEM_MAP_U16_ RW(pu16Mem, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \5310 IEM_MC_MEM_MAP_U16_##a_Type(pu16Mem, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 5330 5311 IEM_MC_FETCH_GREG_U16(uTmpReg, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 5331 5312 IEM_MC_CALL_VOID_AIMPL_2(a_fnWorker16, pu16Mem, pu16Reg); \ 5332 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \5313 IEM_MC_MEM_COMMIT_AND_UNMAP_##a_Type(bUnmapInfo); \ 5333 5314 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_REG(pVCpu, bRm), uTmpReg); \ 5334 5315 \ … … 5347 5328 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 5348 5329 IEMOP_HLP_DONE_DECODING(); \ 5349 IEM_MC_MEM_MAP_U32_ RW(pu32Mem, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \5330 IEM_MC_MEM_MAP_U32_##a_Type(pu32Mem, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 5350 5331 IEM_MC_FETCH_GREG_U32(uTmpReg, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 5351 5332 IEM_MC_CALL_VOID_AIMPL_2(a_fnWorker32, pu32Mem, pu32Reg); \ 5352 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \5333 IEM_MC_MEM_COMMIT_AND_UNMAP_##a_Type(bUnmapInfo); \ 5353 5334 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_REG(pVCpu, bRm), uTmpReg); \ 5354 5335 \ … … 5367 5348 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 5368 5349 IEMOP_HLP_DONE_DECODING(); \ 5369 IEM_MC_MEM_MAP_U64_ RW(pu64Mem, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \5350 IEM_MC_MEM_MAP_U64_##a_Type(pu64Mem, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 5370 5351 IEM_MC_FETCH_GREG_U64(uTmpReg, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 5371 5352 IEM_MC_CALL_VOID_AIMPL_2(a_fnWorker64, pu64Mem, pu64Reg); \ 5372 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \5353 IEM_MC_MEM_COMMIT_AND_UNMAP_##a_Type(bUnmapInfo); \ 5373 5354 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_REG(pVCpu, bRm), uTmpReg); \ 5374 5355 \ … … 5382 5363 if (!(pVCpu->iem.s.fExec & IEM_F_X86_DISREGARD_LOCK)) 5383 5364 { 5384 IEMOP_XCHG_EV_GV(iemAImpl_xchg_u16_locked, iemAImpl_xchg_u32_locked, iemAImpl_xchg_u64_locked );5365 IEMOP_XCHG_EV_GV(iemAImpl_xchg_u16_locked, iemAImpl_xchg_u32_locked, iemAImpl_xchg_u64_locked,ATOMIC); 5385 5366 } 5386 5367 else 5387 5368 { 5388 IEMOP_XCHG_EV_GV(iemAImpl_xchg_u16_unlocked, iemAImpl_xchg_u32_unlocked, iemAImpl_xchg_u64_unlocked );5369 IEMOP_XCHG_EV_GV(iemAImpl_xchg_u16_unlocked, iemAImpl_xchg_u32_unlocked, iemAImpl_xchg_u64_unlocked,RW); 5389 5370 } 5390 5371 } … … 13021 13002 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, a_bRm, 0); \ 13022 13003 IEMOP_HLP_DONE_DECODING(); \ 13023 IEM_MC_MEM_MAP_U8_ RW(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \13004 IEM_MC_MEM_MAP_U8_ATOMIC(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 13024 13005 IEM_MC_FETCH_EFLAGS(EFlags); \ 13025 13006 IEM_MC_CALL_VOID_AIMPL_2(a_fnLockedU8, pu8Dst, pEFlags); \ 13026 13007 \ 13027 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \13008 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 13028 13009 IEM_MC_COMMIT_EFLAGS(EFlags); \ 13029 13010 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 13170 13151 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 13171 13152 IEMOP_HLP_DONE_DECODING(); \ 13172 IEM_MC_MEM_MAP_U16_ RW(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \13153 IEM_MC_MEM_MAP_U16_ATOMIC(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 13173 13154 IEM_MC_FETCH_EFLAGS(EFlags); \ 13174 13155 IEM_MC_CALL_VOID_AIMPL_2(a_fnLockedU16, pu16Dst, pEFlags); \ 13175 13156 \ 13176 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \13157 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 13177 13158 IEM_MC_COMMIT_EFLAGS(EFlags); \ 13178 13159 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 13189 13170 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 13190 13171 IEMOP_HLP_DONE_DECODING(); \ 13191 IEM_MC_MEM_MAP_U32_ RW(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \13172 IEM_MC_MEM_MAP_U32_ATOMIC(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 13192 13173 IEM_MC_FETCH_EFLAGS(EFlags); \ 13193 13174 IEM_MC_CALL_VOID_AIMPL_2(a_fnLockedU32, pu32Dst, pEFlags); \ 13194 13175 \ 13195 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \13176 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 13196 13177 IEM_MC_COMMIT_EFLAGS(EFlags); \ 13197 13178 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 13208 13189 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 13209 13190 IEMOP_HLP_DONE_DECODING(); \ 13210 IEM_MC_MEM_MAP_U64_ RW(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \13191 IEM_MC_MEM_MAP_U64_ATOMIC(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 13211 13192 IEM_MC_FETCH_EFLAGS(EFlags); \ 13212 13193 IEM_MC_CALL_VOID_AIMPL_2(a_fnLockedU64, pu64Dst, pEFlags); \ 13213 13194 \ 13214 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \13195 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 13215 13196 IEM_MC_COMMIT_EFLAGS(EFlags); \ 13216 13197 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ -
trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py
r102876 r102977 3026 3026 'IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT': (McBlock.parseMcGeneric, True, True, False, ), 3027 3027 'IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE': (McBlock.parseMcGeneric, True, True, False, ), 3028 'IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC': (McBlock.parseMcGeneric, True, True, True, ), 3028 3029 'IEM_MC_MEM_COMMIT_AND_UNMAP_RW': (McBlock.parseMcGeneric, True, True, True, ), 3029 3030 'IEM_MC_MEM_COMMIT_AND_UNMAP_RO': (McBlock.parseMcGeneric, True, True, True, ), … … 3037 3038 'IEM_MC_MEM_MAP_R64_WO': (McBlock.parseMcGeneric, True, True, True, ), 3038 3039 'IEM_MC_MEM_MAP_R80_WO': (McBlock.parseMcGeneric, True, True, True, ), 3040 'IEM_MC_MEM_MAP_U8_ATOMIC': (McBlock.parseMcGeneric, True, True, True, ), 3039 3041 'IEM_MC_MEM_MAP_U8_RW': (McBlock.parseMcGeneric, True, True, True, ), 3040 3042 'IEM_MC_MEM_MAP_U8_RO': (McBlock.parseMcGeneric, True, True, True, ), 3041 3043 'IEM_MC_MEM_MAP_U8_WO': (McBlock.parseMcGeneric, True, True, True, ), 3044 'IEM_MC_MEM_MAP_U16_ATOMIC': (McBlock.parseMcGeneric, True, True, True, ), 3042 3045 'IEM_MC_MEM_MAP_U16_RW': (McBlock.parseMcGeneric, True, True, True, ), 3043 3046 'IEM_MC_MEM_MAP_U16_RO': (McBlock.parseMcGeneric, True, True, True, ), 3044 3047 'IEM_MC_MEM_MAP_U16_WO': (McBlock.parseMcGeneric, True, True, True, ), 3048 'IEM_MC_MEM_MAP_U32_ATOMIC': (McBlock.parseMcGeneric, True, True, True, ), 3045 3049 'IEM_MC_MEM_MAP_U32_RW': (McBlock.parseMcGeneric, True, True, True, ), 3046 3050 'IEM_MC_MEM_MAP_U32_RO': (McBlock.parseMcGeneric, True, True, True, ), 3047 3051 'IEM_MC_MEM_MAP_U32_WO': (McBlock.parseMcGeneric, True, True, True, ), 3052 'IEM_MC_MEM_MAP_U64_ATOMIC': (McBlock.parseMcGeneric, True, True, True, ), 3048 3053 'IEM_MC_MEM_MAP_U64_RW': (McBlock.parseMcGeneric, True, True, True, ), 3049 3054 'IEM_MC_MEM_MAP_U64_RO': (McBlock.parseMcGeneric, True, True, True, ), 3050 3055 'IEM_MC_MEM_MAP_U64_WO': (McBlock.parseMcGeneric, True, True, True, ), 3056 'IEM_MC_MEM_MAP_U128_ATOMIC': (McBlock.parseMcGeneric, True, True, True, ), 3051 3057 'IEM_MC_MEM_MAP_U128_RW': (McBlock.parseMcGeneric, True, True, True, ), 3052 3058 'IEM_MC_MEM_MAP_U128_RO': (McBlock.parseMcGeneric, True, True, True, ), … … 3488 3494 self.cTotalMcBlocks = 0; 3489 3495 3490 self.oReMacroName = re.compile( '^[A-Za-z_][A-Za-z0-9_]*$');3491 self.oReMnemonic = re.compile( '^[A-Za-z_][A-Za-z0-9_]*$');3492 self.oReStatsName = re.compile( '^[A-Za-z_][A-Za-z0-9_]*$');3493 self.oReFunctionName= re.compile( '^iemOp_[A-Za-z_][A-Za-z0-9_]*$');3494 self.oReGroupName = re.compile( '^og_[a-z0-9]+(|_[a-z0-9]+|_[a-z0-9]+_[a-z0-9]+)$');3495 self.oReDisEnum = re.compile( '^OP_[A-Z0-9_]+$');3496 self.oReFunTable = re.compile( '^(IEM_STATIC|static) +const +PFNIEMOP +g_apfn[A-Za-z0-9_]+ *\[ *\d* *\] *= *$');3497 self.oReComment = re.compile( '//.*?$|/\*.*?\*/'); ## Full comments.3498 self.oReHashDefine2 = re.compile( '(?s)\A\s*([A-Za-z_][A-Za-z0-9_]*)\(([^)]*)\)\s*(.*)\Z'); ##< With arguments.3499 self.oReHashDefine3 = re.compile( '(?s)\A\s*([A-Za-z_][A-Za-z0-9_]*)[^(]\s*(.*)\Z'); ##< Simple, no arguments.3496 self.oReMacroName = re.compile(r'^[A-Za-z_][A-Za-z0-9_]*$'); 3497 self.oReMnemonic = re.compile(r'^[A-Za-z_][A-Za-z0-9_]*$'); 3498 self.oReStatsName = re.compile(r'^[A-Za-z_][A-Za-z0-9_]*$'); 3499 self.oReFunctionName= re.compile(r'^iemOp_[A-Za-z_][A-Za-z0-9_]*$'); 3500 self.oReGroupName = re.compile(r'^og_[a-z0-9]+(|_[a-z0-9]+|_[a-z0-9]+_[a-z0-9]+)$'); 3501 self.oReDisEnum = re.compile(r'^OP_[A-Z0-9_]+$'); 3502 self.oReFunTable = re.compile(r'^(IEM_STATIC|static) +const +PFNIEMOP +g_apfn[A-Za-z0-9_]+ *\[ *\d* *\] *= *$'); 3503 self.oReComment = re.compile(r'//.*?$|/\*.*?\*/'); ## Full comments. 3504 self.oReHashDefine2 = re.compile(r'(?s)\A\s*([A-Za-z_][A-Za-z0-9_]*)\(([^)]*)\)\s*(.*)\Z'); ##< With arguments. 3505 self.oReHashDefine3 = re.compile(r'(?s)\A\s*([A-Za-z_][A-Za-z0-9_]*)[^(]\s*(.*)\Z'); ##< Simple, no arguments. 3500 3506 self.oReMcBeginEnd = re.compile(r'\bIEM_MC_(BEGIN|END|DEFER_TO_CIMPL_[1-5]_RET)\s*\('); ##> Not DEFER_TO_CIMPL_0_RET! 3501 3507 self.fDebug = True; … … 3619 3625 # Extract the table name. 3620 3626 # 3621 sName = re.search( ' *([a-zA-Z_0-9]+) *\[', sLine).group(1);3627 sName = re.search(r' *([a-zA-Z_0-9]+) *\[', sLine).group(1); 3622 3628 oMap = g_dInstructionMapsByIemName.get(sName); 3623 3629 if not oMap: … … 3635 3641 asPrefixes = ('none', '0x66', '0xf3', '0xf2'); 3636 3642 3637 oEntriesMatch = re.search( '\[ *(256|32) *\]', sLine);3643 oEntriesMatch = re.search(r'\[ *(256|32) *\]', sLine); 3638 3644 if oEntriesMatch: 3639 3645 cEntriesPerByte = 1; … … 3954 3960 def parseTagOpBrief(self, sTag, aasSections, iTagLine, iEndLine): 3955 3961 """ 3956 Tag: \@opbrief3962 Tag: @opbrief 3957 3963 Value: Text description, multiple sections, appended. 3958 3964 … … 3984 3990 def parseTagOpDesc(self, sTag, aasSections, iTagLine, iEndLine): 3985 3991 """ 3986 Tag: \@opdesc3992 Tag: @opdesc 3987 3993 Value: Text description, multiple sections, appended. 3988 3994 … … 4022 4028 def parseTagOpOperandN(self, sTag, aasSections, iTagLine, iEndLine): 4023 4029 """ 4024 Tags: \@op1, \@op2, \@op3, \@op44030 Tags: @op1, @op2, @op3, @op4 4025 4031 Value: [where:]type 4026 4032 … … 4072 4078 def parseTagOpMaps(self, sTag, aasSections, iTagLine, iEndLine): 4073 4079 """ 4074 Tag: \@opmaps4080 Tag: @opmaps 4075 4081 Value: map[,map2] 4076 4082 … … 4107 4113 def parseTagOpPfx(self, sTag, aasSections, iTagLine, iEndLine): 4108 4114 """ 4109 Tag: \@oppfx4115 Tag: @oppfx 4110 4116 Value: n/a|none|0x66|0xf3|0xf2 4111 4117 … … 4145 4151 def parseTagOpcode(self, sTag, aasSections, iTagLine, iEndLine): 4146 4152 """ 4147 Tag: \@opcode4153 Tag: @opcode 4148 4154 Value: 0x?? | /reg (TODO: | mr/reg | 11 /reg | !11 /reg | 11 mr/reg | !11 mr/reg) 4149 4155 … … 4175 4181 def parseTagOpcodeSub(self, sTag, aasSections, iTagLine, iEndLine): 4176 4182 """ 4177 Tag: \@opcodesub4183 Tag: @opcodesub 4178 4184 Value: none | 11 mr/reg | !11 mr/reg | rex.w=0 | rex.w=1 | vex.l=0 | vex.l=1 4179 4185 | 11 mr/reg vex.l=0 | 11 mr/reg vex.l=1 | !11 mr/reg vex.l=0 | !11 mr/reg vex.l=1 … … 4202 4208 def parseTagOpEnc(self, sTag, aasSections, iTagLine, iEndLine): 4203 4209 """ 4204 Tag: \@openc4210 Tag: @openc 4205 4211 Value: ModR/M|fixed|prefix|<map name> 4206 4212 … … 4238 4244 def parseTagOpEFlags(self, sTag, aasSections, iTagLine, iEndLine): 4239 4245 """ 4240 Tags: \@opfltest, \@opflmodify, \@opflundef, \@opflset, \@opflclear4246 Tags: @opfltest, @opflmodify, @opflundef, @opflset, @opflclear 4241 4247 Value: <eflags specifier> 4242 4248 … … 4270 4276 def parseTagOpHints(self, sTag, aasSections, iTagLine, iEndLine): 4271 4277 """ 4272 Tag: \@ophints4278 Tag: @ophints 4273 4279 Value: Comma or space separated list of flags and hints. 4274 4280 … … 4304 4310 def parseTagOpDisEnum(self, sTag, aasSections, iTagLine, iEndLine): 4305 4311 """ 4306 Tag: \@opdisenum4312 Tag: @opdisenum 4307 4313 Value: OP_XXXX 4308 4314 … … 4333 4339 def parseTagOpMinCpu(self, sTag, aasSections, iTagLine, iEndLine): 4334 4340 """ 4335 Tag: \@opmincpu4341 Tag: @opmincpu 4336 4342 Value: <simple CPU name> 4337 4343 … … 4363 4369 def parseTagOpCpuId(self, sTag, aasSections, iTagLine, iEndLine): 4364 4370 """ 4365 Tag: \@opcpuid4371 Tag: @opcpuid 4366 4372 Value: none | <CPUID flag specifier> 4367 4373 … … 4397 4403 def parseTagOpGroup(self, sTag, aasSections, iTagLine, iEndLine): 4398 4404 """ 4399 Tag: \@opgroup4405 Tag: @opgroup 4400 4406 Value: op_grp1[_subgrp2[_subsubgrp3]] 4401 4407 … … 4423 4429 def parseTagOpUnusedInvalid(self, sTag, aasSections, iTagLine, iEndLine): 4424 4430 """ 4425 Tag: \@opunused, \@opinvalid, \@opinvlstyle4431 Tag: @opunused, @opinvalid, @opinvlstyle 4426 4432 Value: <invalid opcode behaviour style> 4427 4433 4428 The \@opunused indicates the specification is for a currently unused4434 The @opunused indicates the specification is for a currently unused 4429 4435 instruction encoding. 4430 4436 4431 The \@opinvalid indicates the specification is for an invalid currently4437 The @opinvalid indicates the specification is for an invalid currently 4432 4438 instruction encoding (like UD2). 4433 4439 4434 The \@opinvlstyle just indicates how CPUs decode the instruction when4435 not supported ( \@opcpuid, \@opmincpu) or disabled.4440 The @opinvlstyle just indicates how CPUs decode the instruction when 4441 not supported (@opcpuid, @opmincpu) or disabled. 4436 4442 """ 4437 4443 oInstr = self.ensureInstructionForOpTag(iTagLine); … … 4461 4467 def parseTagOpTest(self, sTag, aasSections, iTagLine, iEndLine): # pylint: disable=too-many-locals 4462 4468 """ 4463 Tag: \@optest4469 Tag: @optest 4464 4470 Value: [<selectors>[ ]?] <inputs> -> <outputs> 4465 4471 Example: mode==64bit / in1=0xfffffffe:dw in2=1:dw -> out1=0xffffffff:dw outfl=a?,p? … … 4606 4612 def parseTagOpTestNum(self, sTag, aasSections, iTagLine, iEndLine): 4607 4613 """ 4608 Numbered \@optest tag. Either \@optest42 or \@optest[42].4614 Numbered @optest tag. Either @optest42 or @optest[42]. 4609 4615 """ 4610 4616 oInstr = self.ensureInstructionForOpTag(iTagLine); … … 4622 4628 def parseTagOpTestIgnore(self, sTag, aasSections, iTagLine, iEndLine): 4623 4629 """ 4624 Tag: \@optestign | \@optestignore4630 Tag: @optestign | @optestignore 4625 4631 Value: <value is ignored> 4626 4632 4627 4633 This is a simple trick to ignore a test while debugging another. 4628 4634 4629 See also \@oponlytest.4635 See also @oponlytest. 4630 4636 """ 4631 4637 _ = sTag; _ = aasSections; _ = iTagLine; _ = iEndLine; … … 4634 4640 def parseTagOpCopyTests(self, sTag, aasSections, iTagLine, iEndLine): 4635 4641 """ 4636 Tag: \@opcopytests4642 Tag: @opcopytests 4637 4643 Value: <opstat | function> [..] 4638 Example: \@opcopytests add_Eb_Gb4644 Example: @opcopytests add_Eb_Gb 4639 4645 4640 4646 Trick to avoid duplicating tests for different encodings of the same … … 4663 4669 def parseTagOpOnlyTest(self, sTag, aasSections, iTagLine, iEndLine): 4664 4670 """ 4665 Tag: \@oponlytest | \@oponly4671 Tag: @oponlytest | @oponly 4666 4672 Value: none 4667 4673 … … 4669 4675 for singling out one or two new instructions or tests. 4670 4676 4671 See also \@optestignore.4677 See also @optestignore. 4672 4678 """ 4673 4679 oInstr = self.ensureInstructionForOpTag(iTagLine); … … 4686 4692 def parseTagOpXcptType(self, sTag, aasSections, iTagLine, iEndLine): 4687 4693 """ 4688 Tag: \@opxcpttype4694 Tag: @opxcpttype 4689 4695 Value: [none|1|2|3|4|4UA|5|6|7|8|11|12|E1|E1NF|E2|E3|E3NF|E4|E4NF|E5|E5NF|E6|E6NF|E7NF|E9|E9NF|E10|E11|E12|E12NF] 4690 4696 … … 4713 4719 def parseTagOpFunction(self, sTag, aasSections, iTagLine, iEndLine): 4714 4720 """ 4715 Tag: \@opfunction4721 Tag: @opfunction 4716 4722 Value: <VMM function name> 4717 4723 … … 4741 4747 def parseTagOpStats(self, sTag, aasSections, iTagLine, iEndLine): 4742 4748 """ 4743 Tag: \@opstats4749 Tag: @opstats 4744 4750 Value: <VMM statistics base name> 4745 4751 … … 4769 4775 def parseTagOpDone(self, sTag, aasSections, iTagLine, iEndLine): 4770 4776 """ 4771 Tag: \@opdone4777 Tag: @opdone 4772 4778 Value: none 4773 4779 … … 5487 5493 for sName, oMacro in self.dMacros.items(): 5488 5494 if sRegex: 5489 sRegex += '|' + sName;5495 sRegex += r'|' + sName; 5490 5496 else: 5491 sRegex = '\\b(' + sName;5497 sRegex = r'\b(' + sName; 5492 5498 if oMacro.asArgs is not None: 5493 sRegex += '\s*\(';5499 sRegex += r'\s*\('; 5494 5500 else: 5495 sRegex += '\\b';5501 sRegex += r'\b'; 5496 5502 sRegex += ')'; 5497 5503 self.oReMacros = re.compile(sRegex); -
trunk/src/VBox/VMM/VMMAll/IEMAllInstTwoByte0f.cpp.h
r102891 r102977 9157 9157 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 9158 9158 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 9159 IEM_MC_MEM_MAP_U16_ RW(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \9159 IEM_MC_MEM_MAP_U16_ATOMIC(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 9160 9160 \ 9161 9161 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ … … 9163 9163 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU16, pu16Dst, u16Src, pEFlags); \ 9164 9164 \ 9165 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \9165 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 9166 9166 IEM_MC_COMMIT_EFLAGS(EFlags); \ 9167 9167 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 9185 9185 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 9186 9186 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 9187 IEM_MC_MEM_MAP_U32_ RW(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \9187 IEM_MC_MEM_MAP_U32_ATOMIC(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 9188 9188 \ 9189 9189 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ … … 9191 9191 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU32, pu32Dst, u32Src, pEFlags); \ 9192 9192 \ 9193 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \9193 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 9194 9194 IEM_MC_COMMIT_EFLAGS(EFlags); \ 9195 9195 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 9213 9213 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 9214 9214 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 9215 IEM_MC_MEM_MAP_U64_ RW(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \9215 IEM_MC_MEM_MAP_U64_ATOMIC(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 9216 9216 \ 9217 9217 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ … … 9219 9219 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU64, pu64Dst, u64Src, pEFlags); \ 9220 9220 \ 9221 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \9221 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 9222 9222 IEM_MC_COMMIT_EFLAGS(EFlags); \ 9223 9223 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 9299 9299 /* memory destination. */ \ 9300 9300 /** @todo test negative bit offsets! */ \ 9301 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK) || (pVCpu->iem.s.fExec & IEM_F_X86_DISREGARD_LOCK)) \9301 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) \ 9302 9302 { \ 9303 9303 switch (pVCpu->iem.s.enmEffOpSize) \ … … 10274 10274 else 10275 10275 { 10276 #define IEMOP_BODY_CMPXCHG_BYTE(a_fnWorker ) \10276 #define IEMOP_BODY_CMPXCHG_BYTE(a_fnWorker, a_Type) \ 10277 10277 IEM_MC_BEGIN(4, 4, IEM_MC_F_MIN_486, 0); \ 10278 10278 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ … … 10282 10282 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 10283 10283 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ 10284 IEM_MC_MEM_MAP_U8_ RW(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \10284 IEM_MC_MEM_MAP_U8_##a_Type(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 10285 10285 \ 10286 10286 IEM_MC_ARG(uint8_t, u8Src, 2); \ … … 10295 10295 IEM_MC_CALL_VOID_AIMPL_4(a_fnWorker, pu8Dst, pu8Al, u8Src, pEFlags); \ 10296 10296 \ 10297 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \10297 IEM_MC_MEM_COMMIT_AND_UNMAP_##a_Type(bUnmapInfo); \ 10298 10298 IEM_MC_COMMIT_EFLAGS(EFlags); \ 10299 10299 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Al); \ … … 10303 10303 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK) || (pVCpu->iem.s.fExec & IEM_F_X86_DISREGARD_LOCK)) 10304 10304 { 10305 IEMOP_BODY_CMPXCHG_BYTE(iemAImpl_cmpxchg_u8 );10305 IEMOP_BODY_CMPXCHG_BYTE(iemAImpl_cmpxchg_u8,RW); 10306 10306 } 10307 10307 else 10308 10308 { 10309 IEMOP_BODY_CMPXCHG_BYTE(iemAImpl_cmpxchg_u8_locked );10309 IEMOP_BODY_CMPXCHG_BYTE(iemAImpl_cmpxchg_u8_locked,ATOMIC); 10310 10310 } 10311 10311 } … … 10388 10388 else 10389 10389 { 10390 #define IEMOP_BODY_CMPXCHG_EV_GV(a_fnWorker16, a_fnWorker32, a_fnWorker64 ) \10390 #define IEMOP_BODY_CMPXCHG_EV_GV(a_fnWorker16, a_fnWorker32, a_fnWorker64,a_Type) \ 10391 10391 do { \ 10392 10392 switch (pVCpu->iem.s.enmEffOpSize) \ … … 10401 10401 \ 10402 10402 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 10403 IEM_MC_MEM_MAP_U16_ RW(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \10403 IEM_MC_MEM_MAP_U16_##a_Type(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 10404 10404 \ 10405 10405 IEM_MC_ARG(uint16_t, u16Src, 2); \ … … 10414 10414 IEM_MC_CALL_VOID_AIMPL_4(a_fnWorker16, pu16Dst, pu16Ax, u16Src, pEFlags); \ 10415 10415 \ 10416 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \10416 IEM_MC_MEM_COMMIT_AND_UNMAP_##a_Type(bUnmapInfo); \ 10417 10417 IEM_MC_COMMIT_EFLAGS(EFlags); \ 10418 10418 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Ax); \ … … 10429 10429 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 10430 10430 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 10431 IEM_MC_MEM_MAP_U32_ RW(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \10431 IEM_MC_MEM_MAP_U32_##a_Type(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 10432 10432 \ 10433 10433 IEM_MC_ARG(uint32_t, u32Src, 2); \ … … 10442 10442 IEM_MC_CALL_VOID_AIMPL_4(a_fnWorker32, pu32Dst, pu32Eax, u32Src, pEFlags); \ 10443 10443 \ 10444 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \10444 IEM_MC_MEM_COMMIT_AND_UNMAP_##a_Type(bUnmapInfo); \ 10445 10445 IEM_MC_COMMIT_EFLAGS(EFlags); \ 10446 10446 \ … … 10461 10461 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 10462 10462 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 10463 IEM_MC_MEM_MAP_U64_ RW(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \10463 IEM_MC_MEM_MAP_U64_##a_Type(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 10464 10464 \ 10465 10465 IEM_MC_ARG(uint64_t, u64Src, 2); \ … … 10475 10475 IEM_MC_CALL_VOID_AIMPL_4(a_fnWorker64, pu64Dst, pu64Rax, u64Src, pEFlags); \ 10476 10476 \ 10477 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \10477 IEM_MC_MEM_COMMIT_AND_UNMAP_##a_Type(bUnmapInfo); \ 10478 10478 IEM_MC_COMMIT_EFLAGS(EFlags); \ 10479 10479 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Rax); \ … … 10488 10488 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK) || (pVCpu->iem.s.fExec & IEM_F_X86_DISREGARD_LOCK)) 10489 10489 { 10490 IEMOP_BODY_CMPXCHG_EV_GV(iemAImpl_cmpxchg_u16, iemAImpl_cmpxchg_u32, iemAImpl_cmpxchg_u64 );10490 IEMOP_BODY_CMPXCHG_EV_GV(iemAImpl_cmpxchg_u16, iemAImpl_cmpxchg_u32, iemAImpl_cmpxchg_u64,RW); 10491 10491 } 10492 10492 else 10493 10493 { 10494 IEMOP_BODY_CMPXCHG_EV_GV(iemAImpl_cmpxchg_u16_locked, iemAImpl_cmpxchg_u32_locked, iemAImpl_cmpxchg_u64_locked );10494 IEMOP_BODY_CMPXCHG_EV_GV(iemAImpl_cmpxchg_u16_locked, iemAImpl_cmpxchg_u32_locked, iemAImpl_cmpxchg_u64_locked,ATOMIC); 10495 10495 } 10496 10496 } … … 10914 10914 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 10915 10915 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 10916 IEM_MC_MEM_MAP_U16_ RW(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \10916 IEM_MC_MEM_MAP_U16_ATOMIC(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 10917 10917 \ 10918 10918 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ bImm & 0x0f, 1); \ … … 10921 10921 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU16, pu16Dst, u16Src, pEFlags); \ 10922 10922 \ 10923 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \10923 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 10924 10924 IEM_MC_COMMIT_EFLAGS(EFlags); \ 10925 10925 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 10937 10937 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 10938 10938 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 10939 IEM_MC_MEM_MAP_U32_ RW(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \10939 IEM_MC_MEM_MAP_U32_ATOMIC(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 10940 10940 \ 10941 10941 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ bImm & 0x1f, 1); \ … … 10944 10944 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU32, pu32Dst, u32Src, pEFlags); \ 10945 10945 \ 10946 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \10946 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 10947 10947 IEM_MC_COMMIT_EFLAGS(EFlags); \ 10948 10948 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 10960 10960 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 10961 10961 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 10962 IEM_MC_MEM_MAP_U64_ RW(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \10962 IEM_MC_MEM_MAP_U64_ATOMIC(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 10963 10963 \ 10964 10964 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ bImm & 0x3f, 1); \ … … 10967 10967 IEM_MC_CALL_VOID_AIMPL_3(a_fnLockedU64, pu64Dst, u64Src, pEFlags); \ 10968 10968 \ 10969 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \10969 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ 10970 10970 IEM_MC_COMMIT_EFLAGS(EFlags); \ 10971 10971 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 11042 11042 /* memory destination. */ \ 11043 11043 /** @todo test negative bit offsets! */ \ 11044 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK) || (pVCpu->iem.s.fExec & IEM_F_X86_DISREGARD_LOCK)) \11044 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) \ 11045 11045 { \ 11046 11046 switch (pVCpu->iem.s.enmEffOpSize) \ … … 11609 11609 * We're accessing memory. 11610 11610 */ 11611 #define IEMOP_BODY_XADD_BYTE(a_fnWorker ) \11611 #define IEMOP_BODY_XADD_BYTE(a_fnWorker, a_Type) \ 11612 11612 IEM_MC_BEGIN(3, 4, IEM_MC_F_MIN_486, 0); \ 11613 11613 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ … … 11617 11617 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 11618 11618 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ 11619 IEM_MC_MEM_MAP_U8_ RW(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \11619 IEM_MC_MEM_MAP_U8_##a_Type(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 11620 11620 \ 11621 11621 IEM_MC_LOCAL(uint8_t, u8RegCopy); \ … … 11627 11627 IEM_MC_CALL_VOID_AIMPL_3(a_fnWorker, pu8Dst, pu8Reg, pEFlags); \ 11628 11628 \ 11629 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \11629 IEM_MC_MEM_COMMIT_AND_UNMAP_##a_Type(bUnmapInfo); \ 11630 11630 IEM_MC_COMMIT_EFLAGS(EFlags); \ 11631 11631 IEM_MC_STORE_GREG_U8(IEM_GET_MODRM_REG(pVCpu, bRm), u8RegCopy); \ … … 11634 11634 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK) || (pVCpu->iem.s.fExec & IEM_F_X86_DISREGARD_LOCK)) 11635 11635 { 11636 IEMOP_BODY_XADD_BYTE(iemAImpl_xadd_u8 );11636 IEMOP_BODY_XADD_BYTE(iemAImpl_xadd_u8,RW); 11637 11637 } 11638 11638 else 11639 11639 { 11640 IEMOP_BODY_XADD_BYTE(iemAImpl_xadd_u8_locked );11640 IEMOP_BODY_XADD_BYTE(iemAImpl_xadd_u8_locked,ATOMIC); 11641 11641 } 11642 11642 } … … 11716 11716 * We're accessing memory. 11717 11717 */ 11718 #define IEMOP_BODY_XADD_EV_GV(a_fnWorker16, a_fnWorker32, a_fnWorker64 ) \11718 #define IEMOP_BODY_XADD_EV_GV(a_fnWorker16, a_fnWorker32, a_fnWorker64, a_Type) \ 11719 11719 do { \ 11720 11720 switch (pVCpu->iem.s.enmEffOpSize) \ … … 11728 11728 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 11729 11729 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 11730 IEM_MC_MEM_MAP_U16_ RW(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \11730 IEM_MC_MEM_MAP_U16_##a_Type(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 11731 11731 \ 11732 11732 IEM_MC_LOCAL(uint16_t, u16RegCopy); \ … … 11738 11738 IEM_MC_CALL_VOID_AIMPL_3(a_fnWorker16, pu16Dst, pu16Reg, pEFlags); \ 11739 11739 \ 11740 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \11740 IEM_MC_MEM_COMMIT_AND_UNMAP_##a_Type(bUnmapInfo); \ 11741 11741 IEM_MC_COMMIT_EFLAGS(EFlags); \ 11742 11742 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_REG(pVCpu, bRm), u16RegCopy); \ … … 11753 11753 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 11754 11754 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 11755 IEM_MC_MEM_MAP_U32_ RW(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \11755 IEM_MC_MEM_MAP_U32_##a_Type(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 11756 11756 \ 11757 11757 IEM_MC_LOCAL(uint32_t, u32RegCopy); \ … … 11763 11763 IEM_MC_CALL_VOID_AIMPL_3(a_fnWorker32, pu32Dst, pu32Reg, pEFlags); \ 11764 11764 \ 11765 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \11765 IEM_MC_MEM_COMMIT_AND_UNMAP_##a_Type(bUnmapInfo); \ 11766 11766 IEM_MC_COMMIT_EFLAGS(EFlags); \ 11767 11767 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_REG(pVCpu, bRm), u32RegCopy); \ … … 11778 11778 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 11779 11779 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 11780 IEM_MC_MEM_MAP_U64_ RW(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \11780 IEM_MC_MEM_MAP_U64_##a_Type(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 11781 11781 \ 11782 11782 IEM_MC_LOCAL(uint64_t, u64RegCopy); \ … … 11788 11788 IEM_MC_CALL_VOID_AIMPL_3(a_fnWorker64, pu64Dst, pu64Reg, pEFlags); \ 11789 11789 \ 11790 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \11790 IEM_MC_MEM_COMMIT_AND_UNMAP_##a_Type(bUnmapInfo); \ 11791 11791 IEM_MC_COMMIT_EFLAGS(EFlags); \ 11792 11792 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_REG(pVCpu, bRm), u64RegCopy); \ … … 11801 11801 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK) || (pVCpu->iem.s.fExec & IEM_F_X86_DISREGARD_LOCK)) 11802 11802 { 11803 IEMOP_BODY_XADD_EV_GV(iemAImpl_xadd_u16, iemAImpl_xadd_u32, iemAImpl_xadd_u64 );11803 IEMOP_BODY_XADD_EV_GV(iemAImpl_xadd_u16, iemAImpl_xadd_u32, iemAImpl_xadd_u64,RW); 11804 11804 } 11805 11805 else 11806 11806 { 11807 IEMOP_BODY_XADD_EV_GV(iemAImpl_xadd_u16_locked, iemAImpl_xadd_u32_locked, iemAImpl_xadd_u64_locked );11807 IEMOP_BODY_XADD_EV_GV(iemAImpl_xadd_u16_locked, iemAImpl_xadd_u32_locked, iemAImpl_xadd_u64_locked,ATOMIC); 11808 11808 } 11809 11809 } … … 12433 12433 { 12434 12434 IEMOP_MNEMONIC(cmpxchg8b, "cmpxchg8b Mq"); 12435 #define IEMOP_BODY_CMPXCHG8B(a_fnWorker ) \12435 #define IEMOP_BODY_CMPXCHG8B(a_fnWorker, a_Type) \ 12436 12436 IEM_MC_BEGIN(4, 5, IEM_MC_F_NOT_286_OR_OLDER, 0); \ 12437 12437 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ … … 12441 12441 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 12442 12442 IEM_MC_ARG(uint64_t *, pu64MemDst, 0); \ 12443 IEM_MC_MEM_MAP_U64_ RW(pu64MemDst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \12443 IEM_MC_MEM_MAP_U64_##a_Type(pu64MemDst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 12444 12444 \ 12445 12445 IEM_MC_LOCAL(RTUINT64U, u64EaxEdx); \ … … 12455 12455 IEM_MC_CALL_VOID_AIMPL_4(a_fnWorker, pu64MemDst, pu64EaxEdx, pu64EbxEcx, pEFlags); \ 12456 12456 \ 12457 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \12457 IEM_MC_MEM_COMMIT_AND_UNMAP_##a_Type(bUnmapInfo); \ 12458 12458 IEM_MC_COMMIT_EFLAGS(EFlags); \ 12459 12459 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF) { \ … … 12465 12465 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK) || (pVCpu->iem.s.fExec & IEM_F_X86_DISREGARD_LOCK)) 12466 12466 { 12467 IEMOP_BODY_CMPXCHG8B(iemAImpl_cmpxchg8b );12468 } 12469 else 12470 { 12471 IEMOP_BODY_CMPXCHG8B(iemAImpl_cmpxchg8b_locked );12467 IEMOP_BODY_CMPXCHG8B(iemAImpl_cmpxchg8b,RW); 12468 } 12469 else 12470 { 12471 IEMOP_BODY_CMPXCHG8B(iemAImpl_cmpxchg8b_locked,ATOMIC); 12472 12472 } 12473 12473 } … … 12485 12485 * the patterns IEMAllThrdPython.py requires for the code morphing. 12486 12486 */ 12487 #define BODY_CMPXCHG16B_HEAD(bUnmapInfoStmt ) \12487 #define BODY_CMPXCHG16B_HEAD(bUnmapInfoStmt, a_Type) \ 12488 12488 IEM_MC_BEGIN(5, 4, IEM_MC_F_64BIT, 0); \ 12489 12489 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ … … 12494 12494 bUnmapInfoStmt; \ 12495 12495 IEM_MC_ARG(PRTUINT128U, pu128MemDst, 0); \ 12496 IEM_MC_MEM_MAP_U128_ RW(pu128MemDst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \12496 IEM_MC_MEM_MAP_U128_##a_Type(pu128MemDst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 12497 12497 \ 12498 12498 IEM_MC_LOCAL(RTUINT128U, u128RaxRdx); \ … … 12507 12507 IEM_MC_FETCH_EFLAGS(EFlags) 12508 12508 12509 #define BODY_CMPXCHG16B_TAIL \12510 IEM_MC_MEM_COMMIT_AND_UNMAP_ RW(bUnmapInfo); \12509 #define BODY_CMPXCHG16B_TAIL(a_Type) \ 12510 IEM_MC_MEM_COMMIT_AND_UNMAP_##a_Type(bUnmapInfo); \ 12511 12511 IEM_MC_COMMIT_EFLAGS(EFlags); \ 12512 12512 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF) { \ … … 12521 12521 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK) || (pVCpu->iem.s.fExec & IEM_F_X86_DISREGARD_LOCK)) 12522 12522 { 12523 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo) );12524 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags );12525 BODY_CMPXCHG16B_TAIL ;12523 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo),RW); 12524 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags,RW); 12525 BODY_CMPXCHG16B_TAIL(RW); 12526 12526 } 12527 12527 else 12528 12528 { 12529 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo) );12530 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_locked, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags );12531 BODY_CMPXCHG16B_TAIL ;12529 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo),ATOMIC); 12530 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_locked, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags,ATOMIC); 12531 BODY_CMPXCHG16B_TAIL(ATOMIC); 12532 12532 } 12533 12533 } … … 12536 12536 if (pVCpu->CTX_SUFF(pVM)->cCpus == 1) 12537 12537 { 12538 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo) );12539 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_fallback, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags );12540 BODY_CMPXCHG16B_TAIL ;12538 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo),RW); 12539 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_fallback, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags,RW); 12540 BODY_CMPXCHG16B_TAIL(RW); 12541 12541 } 12542 12542 else 12543 12543 { 12544 BODY_CMPXCHG16B_HEAD(IEM_MC_ARG(uint8_t, bUnmapInfo, 4) );12544 BODY_CMPXCHG16B_HEAD(IEM_MC_ARG(uint8_t, bUnmapInfo, 4),RW); 12545 12545 IEM_MC_CALL_CIMPL_5(IEM_CIMPL_F_STATUS_FLAGS, 12546 12546 RT_BIT_64(kIemNativeGstReg_GprFirst + X86_GREG_xAX) … … 12556 12556 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK) || (pVCpu->iem.s.fExec & IEM_F_X86_DISREGARD_LOCK)) 12557 12557 { 12558 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo) );12558 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo),RW); 12559 12559 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12560 BODY_CMPXCHG16B_TAIL ;12560 BODY_CMPXCHG16B_TAIL(RW); 12561 12561 } 12562 12562 else 12563 12563 { 12564 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo) );12564 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo),ATOMIC); 12565 12565 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_locked, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12566 BODY_CMPXCHG16B_TAIL ;12566 BODY_CMPXCHG16B_TAIL(ATOMIC); 12567 12567 } 12568 12568 … … 12574 12574 if (pVCpu->CTX_SUFF(pVM)->cCpus == 1) 12575 12575 { 12576 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo) );12576 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo),RW); 12577 12577 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_fallback, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12578 BODY_CMPXCHG16B_TAIL ;12578 BODY_CMPXCHG16B_TAIL(RW); 12579 12579 } 12580 12580 else 12581 12581 { 12582 BODY_CMPXCHG16B_HEAD(IEM_MC_ARG(uint8_t, bUnmapInfo, 4) );12582 BODY_CMPXCHG16B_HEAD(IEM_MC_ARG(uint8_t, bUnmapInfo, 4),RW); 12583 12583 IEM_MC_CALL_CIMPL_4(IEM_CIMPL_F_STATUS_FLAGS, 12584 12584 RT_BIT_64(kIemNativeGstReg_GprFirst + X86_GREG_xAX) -
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h
r102790 r102977 182 182 183 183 #ifdef IEM_WITH_SETJMP 184 185 /** 186 * Maps a data buffer for atomic read+write direct access (or via a bounce 187 * buffer), longjmp on error. 188 * 189 * @param pVCpu The cross context virtual CPU structure of the calling thread. 190 * @param pbUnmapInfo Pointer to unmap info variable. 191 * @param iSegReg The index of the segment register to use for 192 * this access. The base and limits are checked. 193 * @param GCPtrMem The address of the guest memory. 194 */ 195 TMPL_MEM_TYPE * 196 RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, 197 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 198 { 199 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 200 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 201 # endif 202 Log8(("IEM AT/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem)); 203 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */ 204 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, 205 IEM_ACCESS_DATA_ATOMIC, TMPL_MEM_TYPE_ALIGN); 206 } 207 184 208 185 209 /** -
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h
r102790 r102977 382 382 /** 383 383 * Inlined read-write memory mapping function that longjumps on error. 384 * 385 * Almost identical to RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtJmp). 384 386 */ 385 387 DECL_INLINE_THROW(TMPL_MEM_TYPE *) … … 438 440 /** 439 441 * Inlined flat read-write memory mapping function that longjumps on error. 442 * 443 * Almost identical to RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,AtJmp). 440 444 */ 441 445 DECL_INLINE_THROW(TMPL_MEM_TYPE *) … … 490 494 } 491 495 496 # ifdef TMPL_MEM_WITH_ATOMIC_MAPPING 497 498 /** 499 * Inlined atomic read-write memory mapping function that longjumps on error. 500 * 501 * Almost identical to RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwJmp). 502 */ 503 DECL_INLINE_THROW(TMPL_MEM_TYPE *) 504 RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, 505 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 506 { 507 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 508 /* 509 * Convert from segmented to flat address and check that it doesn't cross a page boundrary. 510 */ 511 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem); 512 # if TMPL_MEM_TYPE_SIZE > 1 513 if (RT_LIKELY(!(GCPtrEff & TMPL_MEM_TYPE_ALIGN))) /* strictly aligned, otherwise do fall back which knows th details. */ 514 # endif 515 { 516 /* 517 * TLB lookup. 518 */ 519 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff); 520 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag); 521 if (RT_LIKELY(pTlbe->uTag == uTag)) 522 { 523 /* 524 * Check TLB page table level access flags. 525 */ 526 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 527 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 528 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 529 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ 530 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE 531 | fNoUser)) 532 == pVCpu->iem.s.DataTlb.uTlbPhysRev)) 533 { 534 /* 535 * Return the address. 536 */ 537 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 538 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 539 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 540 *pbUnmapInfo = 0; 541 Log7Ex(LOG_GROUP_IEM_MEM,("IEM AT/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n", 542 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK])); 543 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 544 } 545 } 546 } 547 548 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 549 outdated page pointer, or other troubles. (This will do a TLB load.) */ 550 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem)); 551 # endif 552 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 553 } 554 555 556 /** 557 * Inlined flat read-write memory mapping function that longjumps on error. 558 * 559 * Almost identical to RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RwJmp). 560 */ 561 DECL_INLINE_THROW(TMPL_MEM_TYPE *) 562 RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,AtJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, 563 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 564 { 565 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 566 /* 567 * Check that the address doesn't cross a page boundrary. 568 */ 569 # if TMPL_MEM_TYPE_SIZE > 1 570 if (RT_LIKELY(!(GCPtrMem & TMPL_MEM_TYPE_ALIGN))) /* strictly aligned, otherwise do fall back which knows th details. */ 571 # endif 572 { 573 /* 574 * TLB lookup. 575 */ 576 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem); 577 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag); 578 if (RT_LIKELY(pTlbe->uTag == uTag)) 579 { 580 /* 581 * Check TLB page table level access flags. 582 */ 583 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 584 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 585 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 586 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ 587 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE 588 | fNoUser)) 589 == pVCpu->iem.s.DataTlb.uTlbPhysRev)) 590 { 591 /* 592 * Return the address. 593 */ 594 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 595 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 596 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 597 *pbUnmapInfo = 0; 598 Log7Ex(LOG_GROUP_IEM_MEM,("IEM AT/map " TMPL_MEM_FMT_DESC " %RGv: %p\n", 599 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK])); 600 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]; 601 } 602 } 603 } 604 605 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 606 outdated page pointer, or other troubles. (This will do a TLB load.) */ 607 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 608 # endif 609 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem); 610 } 611 612 # endif /* TMPL_MEM_WITH_ATOMIC_MAPPING */ 492 613 493 614 /** -
trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py
r102876 r102977 169 169 'IEM_MC_MEM_FLAT_MAP_R64_WO': (None, True, True, True, ), 170 170 'IEM_MC_MEM_FLAT_MAP_R80_WO': (None, True, True, True, ), 171 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC': (None, True, True, True, ), 171 172 'IEM_MC_MEM_FLAT_MAP_U8_RO': (None, True, True, True, ), 172 173 'IEM_MC_MEM_FLAT_MAP_U8_RW': (None, True, True, True, ), 174 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC': (None, True, True, True, ), 173 175 'IEM_MC_MEM_FLAT_MAP_U16_RO': (None, True, True, True, ), 174 176 'IEM_MC_MEM_FLAT_MAP_U16_RW': (None, True, True, True, ), 177 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC': (None, True, True, True, ), 175 178 'IEM_MC_MEM_FLAT_MAP_U32_RO': (None, True, True, True, ), 176 179 'IEM_MC_MEM_FLAT_MAP_U32_RW': (None, True, True, True, ), 180 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC': (None, True, True, True, ), 177 181 'IEM_MC_MEM_FLAT_MAP_U64_RO': (None, True, True, True, ), 178 182 'IEM_MC_MEM_FLAT_MAP_U64_RW': (None, True, True, True, ), 183 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC': (None, True, True, True, ), 179 184 'IEM_MC_MEM_FLAT_MAP_U128_RW': (None, True, True, True, ), 180 185 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE': (None, True, True, False, ), … … 398 403 elif oStmt.sName in ('IEM_MC_MEM_COMMIT_AND_UNMAP_RW', 'IEM_MC_MEM_COMMIT_AND_UNMAP_RO', 399 404 'IEM_MC_MEM_COMMIT_AND_UNMAP_WO', 'IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO', 405 'IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC', 400 406 'IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO'): 401 407 # -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r102904 r102977 2228 2228 2229 2229 /** 2230 * Used by TB code to map unsigned 8-bit data for atomic read-write w/ 2231 * segmentation. 2232 */ 2233 IEM_DECL_NATIVE_HLP_DEF(uint8_t *, iemNativeHlpMemMapDataU8Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, 2234 RTGCPTR GCPtrMem, uint8_t iSegReg)) 2235 { 2236 #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED 2237 return iemMemMapDataU8AtSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 2238 #else 2239 return iemMemMapDataU8AtJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 2240 #endif 2241 } 2242 2243 2244 /** 2230 2245 * Used by TB code to map unsigned 8-bit data read-write w/ segmentation. 2231 2246 */ … … 2270 2285 2271 2286 /** 2287 * Used by TB code to map unsigned 16-bit data for atomic read-write w/ 2288 * segmentation. 2289 */ 2290 IEM_DECL_NATIVE_HLP_DEF(uint16_t *, iemNativeHlpMemMapDataU16Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, 2291 RTGCPTR GCPtrMem, uint8_t iSegReg)) 2292 { 2293 #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED 2294 return iemMemMapDataU16AtSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 2295 #else 2296 return iemMemMapDataU16AtJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 2297 #endif 2298 } 2299 2300 2301 /** 2272 2302 * Used by TB code to map unsigned 16-bit data read-write w/ segmentation. 2273 2303 */ … … 2312 2342 2313 2343 /** 2344 * Used by TB code to map unsigned 32-bit data for atomic read-write w/ 2345 * segmentation. 2346 */ 2347 IEM_DECL_NATIVE_HLP_DEF(uint32_t *, iemNativeHlpMemMapDataU32Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, 2348 RTGCPTR GCPtrMem, uint8_t iSegReg)) 2349 { 2350 #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED 2351 return iemMemMapDataU32AtSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 2352 #else 2353 return iemMemMapDataU32AtJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 2354 #endif 2355 } 2356 2357 2358 /** 2314 2359 * Used by TB code to map unsigned 32-bit data read-write w/ segmentation. 2315 2360 */ … … 2354 2399 2355 2400 /** 2401 * Used by TB code to map unsigned 64-bit data for atomic read-write w/ 2402 * segmentation. 2403 */ 2404 IEM_DECL_NATIVE_HLP_DEF(uint64_t *, iemNativeHlpMemMapDataU64Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, 2405 RTGCPTR GCPtrMem, uint8_t iSegReg)) 2406 { 2407 #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED 2408 return iemMemMapDataU64AtSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 2409 #else 2410 return iemMemMapDataU64AtJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 2411 #endif 2412 } 2413 2414 2415 /** 2356 2416 * Used by TB code to map unsigned 64-bit data read-write w/ segmentation. 2357 2417 */ … … 2419 2479 #else 2420 2480 return iemMemMapDataD80WoJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 2481 #endif 2482 } 2483 2484 2485 /** 2486 * Used by TB code to map unsigned 128-bit data for atomic read-write w/ 2487 * segmentation. 2488 */ 2489 IEM_DECL_NATIVE_HLP_DEF(RTUINT128U *, iemNativeHlpMemMapDataU128Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, 2490 RTGCPTR GCPtrMem, uint8_t iSegReg)) 2491 { 2492 #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED 2493 return iemMemMapDataU128AtSafeJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 2494 #else 2495 return iemMemMapDataU128AtJmp(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 2421 2496 #endif 2422 2497 } … … 2470 2545 2471 2546 /** 2547 * Used by TB code to map unsigned 8-bit data for atomic read-write w/ flat 2548 * address. 2549 */ 2550 IEM_DECL_NATIVE_HLP_DEF(uint8_t *, iemNativeHlpMemFlatMapDataU8Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem)) 2551 { 2552 #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED 2553 return iemMemMapDataU8AtSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem); 2554 #else 2555 return iemMemFlatMapDataU8AtJmp(pVCpu, pbUnmapInfo, GCPtrMem); 2556 #endif 2557 } 2558 2559 2560 /** 2472 2561 * Used by TB code to map unsigned 8-bit data read-write w/ flat address. 2473 2562 */ … … 2509 2598 2510 2599 /** 2600 * Used by TB code to map unsigned 16-bit data for atomic read-write w/ flat 2601 * address. 2602 */ 2603 IEM_DECL_NATIVE_HLP_DEF(uint16_t *, iemNativeHlpMemFlatMapDataU16Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem)) 2604 { 2605 #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED 2606 return iemMemMapDataU16AtSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem); 2607 #else 2608 return iemMemFlatMapDataU16AtJmp(pVCpu, pbUnmapInfo, GCPtrMem); 2609 #endif 2610 } 2611 2612 2613 /** 2511 2614 * Used by TB code to map unsigned 16-bit data read-write w/ flat address. 2512 2615 */ … … 2548 2651 2549 2652 /** 2653 * Used by TB code to map unsigned 32-bit data for atomic read-write w/ flat 2654 * address. 2655 */ 2656 IEM_DECL_NATIVE_HLP_DEF(uint32_t *, iemNativeHlpMemFlatMapDataU32Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem)) 2657 { 2658 #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED 2659 return iemMemMapDataU32AtSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem); 2660 #else 2661 return iemMemFlatMapDataU32AtJmp(pVCpu, pbUnmapInfo, GCPtrMem); 2662 #endif 2663 } 2664 2665 2666 /** 2550 2667 * Used by TB code to map unsigned 32-bit data read-write w/ flat address. 2551 2668 */ … … 2587 2704 2588 2705 /** 2706 * Used by TB code to map unsigned 64-bit data for atomic read-write w/ flat 2707 * address. 2708 */ 2709 IEM_DECL_NATIVE_HLP_DEF(uint64_t *, iemNativeHlpMemFlatMapDataU64Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem)) 2710 { 2711 #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED 2712 return iemMemMapDataU64AtSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem); 2713 #else 2714 return iemMemFlatMapDataU64AtJmp(pVCpu, pbUnmapInfo, GCPtrMem); 2715 #endif 2716 } 2717 2718 2719 /** 2589 2720 * Used by TB code to map unsigned 64-bit data read-write w/ flat address. 2590 2721 */ … … 2647 2778 #else 2648 2779 return iemMemFlatMapDataD80WoJmp(pVCpu, pbUnmapInfo, GCPtrMem); 2780 #endif 2781 } 2782 2783 2784 /** 2785 * Used by TB code to map unsigned 128-bit data for atomic read-write w/ flat 2786 * address. 2787 */ 2788 IEM_DECL_NATIVE_HLP_DEF(RTUINT128U *, iemNativeHlpMemFlatMapDataU128Atomic,(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, RTGCPTR GCPtrMem)) 2789 { 2790 #ifdef IEMNATIVE_WITH_TLB_LOOKUP_MAPPED 2791 return iemMemMapDataU128AtSafeJmp(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem); 2792 #else 2793 return iemMemFlatMapDataU128AtJmp(pVCpu, pbUnmapInfo, GCPtrMem); 2649 2794 #endif 2650 2795 } … … 2693 2838 * Helpers: Commit, rollback & unmap * 2694 2839 *********************************************************************************************************************************/ 2840 2841 /** 2842 * Used by TB code to commit and unmap a read-write memory mapping. 2843 */ 2844 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemCommitAndUnmapAtomic,(PVMCPUCC pVCpu, uint8_t bUnmapInfo)) 2845 { 2846 return iemMemCommitAndUnmapAtSafeJmp(pVCpu, bUnmapInfo); 2847 } 2848 2695 2849 2696 2850 /** … … 11903 12057 *********************************************************************************************************************************/ 11904 12058 12059 #define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 12060 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint8_t), \ 12061 IEM_ACCESS_DATA_ATOMIC, 0 /*fAlignMask*/, \ 12062 (uintptr_t)iemNativeHlpMemMapDataU8Atomic, pCallEntry->idxInstr) 12063 11905 12064 #define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11906 12065 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint8_t), \ 11907 IEM_ACCESS_ TYPE_READ | IEM_ACCESS_TYPE_WRITE, 0 /*fAlignMask*/, \12066 IEM_ACCESS_DATA_RW, 0 /*fAlignMask*/, \ 11908 12067 (uintptr_t)iemNativeHlpMemMapDataU8Rw, pCallEntry->idxInstr) 11909 12068 11910 12069 #define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11911 12070 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint8_t), \ 11912 IEM_ACCESS_ TYPE_WRITE, 0 /*fAlignMask*/, \12071 IEM_ACCESS_DATA_W, 0 /*fAlignMask*/, \ 11913 12072 (uintptr_t)iemNativeHlpMemMapDataU8Wo, pCallEntry->idxInstr) \ 11914 12073 11915 12074 #define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11916 12075 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint8_t), \ 11917 IEM_ACCESS_ TYPE_READ, 0 /*fAlignMask*/, \12076 IEM_ACCESS_DATA_R, 0 /*fAlignMask*/, \ 11918 12077 (uintptr_t)iemNativeHlpMemMapDataU8Ro, pCallEntry->idxInstr) 11919 12078 12079 12080 #define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 12081 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint16_t), \ 12082 IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1 /*fAlignMask*/, \ 12083 (uintptr_t)iemNativeHlpMemMapDataU16Atomic, pCallEntry->idxInstr) 11920 12084 11921 12085 #define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11922 12086 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint16_t), \ 11923 IEM_ACCESS_ TYPE_READ | IEM_ACCESS_TYPE_WRITE, sizeof(uint16_t) - 1 /*fAlignMask*/, \12087 IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1 /*fAlignMask*/, \ 11924 12088 (uintptr_t)iemNativeHlpMemMapDataU16Rw, pCallEntry->idxInstr) 11925 12089 11926 12090 #define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11927 12091 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint16_t), \ 11928 IEM_ACCESS_ TYPE_WRITE, sizeof(uint16_t) - 1 /*fAlignMask*/, \12092 IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMask*/, \ 11929 12093 (uintptr_t)iemNativeHlpMemMapDataU16Wo, pCallEntry->idxInstr) \ 11930 12094 11931 12095 #define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11932 12096 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint16_t), \ 11933 IEM_ACCESS_ TYPE_READ, sizeof(uint16_t) - 1 /*fAlignMask*/, \12097 IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1 /*fAlignMask*/, \ 11934 12098 (uintptr_t)iemNativeHlpMemMapDataU16Ro, pCallEntry->idxInstr) 11935 12099 11936 12100 #define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11937 12101 off = iemNativeEmitMemMapCommon(pReNative, off, a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(int16_t), \ 11938 IEM_ACCESS_ TYPE_WRITE, sizeof(uint16_t) - 1 /*fAlignMask*/, \12102 IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMask*/, \ 11939 12103 (uintptr_t)iemNativeHlpMemMapDataU16Wo, pCallEntry->idxInstr) \ 11940 12104 12105 12106 #define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 12107 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint32_t), \ 12108 IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1 /*fAlignMask*/, \ 12109 (uintptr_t)iemNativeHlpMemMapDataU32Atomic, pCallEntry->idxInstr) 11941 12110 11942 12111 #define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11943 12112 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint32_t), \ 11944 IEM_ACCESS_ TYPE_READ | IEM_ACCESS_TYPE_WRITE, sizeof(uint32_t) - 1 /*fAlignMask*/, \12113 IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1 /*fAlignMask*/, \ 11945 12114 (uintptr_t)iemNativeHlpMemMapDataU32Rw, pCallEntry->idxInstr) 11946 12115 11947 12116 #define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11948 12117 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint32_t), \ 11949 IEM_ACCESS_ TYPE_WRITE, sizeof(uint32_t) - 1 /*fAlignMask*/, \12118 IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMask*/, \ 11950 12119 (uintptr_t)iemNativeHlpMemMapDataU32Wo, pCallEntry->idxInstr) \ 11951 12120 11952 12121 #define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11953 12122 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint32_t), \ 11954 IEM_ACCESS_ TYPE_READ, sizeof(uint32_t) - 1 /*fAlignMask*/, \12123 IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1 /*fAlignMask*/, \ 11955 12124 (uintptr_t)iemNativeHlpMemMapDataU32Ro, pCallEntry->idxInstr) 11956 12125 11957 12126 #define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11958 12127 off = iemNativeEmitMemMapCommon(pReNative, off, a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(int32_t), \ 11959 IEM_ACCESS_ TYPE_WRITE, sizeof(uint32_t) - 1 /*fAlignMask*/, \12128 IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMask*/, \ 11960 12129 (uintptr_t)iemNativeHlpMemMapDataU32Wo, pCallEntry->idxInstr) \ 11961 12130 12131 12132 #define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 12133 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint64_t), \ 12134 IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1 /*fAlignMask*/, \ 12135 (uintptr_t)iemNativeHlpMemMapDataU64Atomic, pCallEntry->idxInstr) 11962 12136 11963 12137 #define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11964 12138 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint64_t), \ 11965 IEM_ACCESS_ TYPE_READ | IEM_ACCESS_TYPE_WRITE, sizeof(uint64_t) - 1 /*fAlignMask*/, \12139 IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1 /*fAlignMask*/, \ 11966 12140 (uintptr_t)iemNativeHlpMemMapDataU64Rw, pCallEntry->idxInstr) 11967 11968 12141 #define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11969 12142 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint64_t), \ 11970 IEM_ACCESS_ TYPE_WRITE, sizeof(uint64_t) - 1 /*fAlignMask*/, \12143 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, \ 11971 12144 (uintptr_t)iemNativeHlpMemMapDataU64Wo, pCallEntry->idxInstr) \ 11972 12145 11973 12146 #define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11974 12147 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint64_t), \ 11975 IEM_ACCESS_ TYPE_READ, sizeof(uint64_t) - 1 /*fAlignMask*/, \12148 IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1 /*fAlignMask*/, \ 11976 12149 (uintptr_t)iemNativeHlpMemMapDataU64Ro, pCallEntry->idxInstr) 11977 12150 11978 12151 #define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11979 12152 off = iemNativeEmitMemMapCommon(pReNative, off, a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(int64_t), \ 11980 IEM_ACCESS_ TYPE_WRITE, sizeof(uint64_t) - 1 /*fAlignMask*/, \12153 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, \ 11981 12154 (uintptr_t)iemNativeHlpMemMapDataU64Wo, pCallEntry->idxInstr) \ 11982 12155 … … 11984 12157 #define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11985 12158 off = iemNativeEmitMemMapCommon(pReNative, off, a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTFLOAT80U), \ 11986 IEM_ACCESS_ TYPE_WRITE, sizeof(uint64_t) - 1 /*fAlignMask*/, \12159 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, \ 11987 12160 (uintptr_t)iemNativeHlpMemMapDataR80Wo, pCallEntry->idxInstr) \ 11988 12161 11989 12162 #define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11990 12163 off = iemNativeEmitMemMapCommon(pReNative, off, a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTFLOAT80U), \ 11991 IEM_ACCESS_ TYPE_WRITE, sizeof(uint64_t) - 1 /*fAlignMask*/, /** @todo check BCD align */ \12164 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, /** @todo check BCD align */ \ 11992 12165 (uintptr_t)iemNativeHlpMemMapDataD80Wo, pCallEntry->idxInstr) \ 11993 12166 12167 12168 #define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 12169 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTUINT128U), \ 12170 IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \ 12171 (uintptr_t)iemNativeHlpMemMapDataU128Atomic, pCallEntry->idxInstr) 11994 12172 11995 12173 #define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 11996 12174 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTUINT128U), \ 11997 IEM_ACCESS_ TYPE_READ | IEM_ACCESS_TYPE_WRITE, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \12175 IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \ 11998 12176 (uintptr_t)iemNativeHlpMemMapDataU128Rw, pCallEntry->idxInstr) 11999 12177 12000 12178 #define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 12001 12179 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTUINT128U), \ 12002 IEM_ACCESS_ TYPE_WRITE, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \12180 IEM_ACCESS_DATA_W, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \ 12003 12181 (uintptr_t)iemNativeHlpMemMapDataU128Wo, pCallEntry->idxInstr) \ 12004 12182 12005 12183 #define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 12006 12184 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTUINT128U), \ 12007 IEM_ACCESS_ TYPE_READ, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \12185 IEM_ACCESS_DATA_R, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \ 12008 12186 (uintptr_t)iemNativeHlpMemMapDataU128Ro, pCallEntry->idxInstr) 12009 12187 12010 12188 12189 12190 #define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 12191 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint8_t), \ 12192 IEM_ACCESS_DATA_ATOMIC, 0 /*fAlignMask*/, \ 12193 (uintptr_t)iemNativeHlpMemFlatMapDataU8Atomic, pCallEntry->idxInstr) 12011 12194 12012 12195 #define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 12013 12196 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint8_t), \ 12014 IEM_ACCESS_ TYPE_READ | IEM_ACCESS_TYPE_WRITE, 0 /*fAlignMask*/, \12197 IEM_ACCESS_DATA_RW, 0 /*fAlignMask*/, \ 12015 12198 (uintptr_t)iemNativeHlpMemFlatMapDataU8Rw, pCallEntry->idxInstr) 12016 12199 12017 12200 #define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 12018 12201 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint8_t), \ 12019 IEM_ACCESS_ TYPE_WRITE, 0 /*fAlignMask*/, \12202 IEM_ACCESS_DATA_W, 0 /*fAlignMask*/, \ 12020 12203 (uintptr_t)iemNativeHlpMemFlatMapDataU8Wo, pCallEntry->idxInstr) \ 12021 12204 12022 12205 #define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 12023 12206 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint8_t), \ 12024 IEM_ACCESS_ TYPE_READ, 0 /*fAlignMask*/, \12207 IEM_ACCESS_DATA_R, 0 /*fAlignMask*/, \ 12025 12208 (uintptr_t)iemNativeHlpMemFlatMapDataU8Ro, pCallEntry->idxInstr) 12026 12209 12210 12211 #define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 12212 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint16_t), \ 12213 IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1 /*fAlignMask*/, \ 12214 (uintptr_t)iemNativeHlpMemFlatMapDataU16Atomic, pCallEntry->idxInstr) 12027 12215 12028 12216 #define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 12029 12217 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint16_t), \ 12030 IEM_ACCESS_ TYPE_READ | IEM_ACCESS_TYPE_WRITE, sizeof(uint16_t) - 1 /*fAlignMask*/, \12218 IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1 /*fAlignMask*/, \ 12031 12219 (uintptr_t)iemNativeHlpMemFlatMapDataU16Rw, pCallEntry->idxInstr) 12032 12220 12033 12221 #define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 12034 12222 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint16_t), \ 12035 IEM_ACCESS_ TYPE_WRITE, sizeof(uint16_t) - 1 /*fAlignMask*/, \12223 IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMask*/, \ 12036 12224 (uintptr_t)iemNativeHlpMemFlatMapDataU16Wo, pCallEntry->idxInstr) \ 12037 12225 12038 12226 #define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 12039 12227 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint16_t), \ 12040 IEM_ACCESS_ TYPE_READ, sizeof(uint16_t) - 1 /*fAlignMask*/, \12228 IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1 /*fAlignMask*/, \ 12041 12229 (uintptr_t)iemNativeHlpMemFlatMapDataU16Ro, pCallEntry->idxInstr) 12042 12230 12043 12231 #define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \ 12044 12232 off = iemNativeEmitMemMapCommon(pReNative, off, a_pi16Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(int16_t), \ 12045 IEM_ACCESS_ TYPE_WRITE, sizeof(uint16_t) - 1 /*fAlignMask*/, \12233 IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMask*/, \ 12046 12234 (uintptr_t)iemNativeHlpMemFlatMapDataU16Wo, pCallEntry->idxInstr) \ 12047 12235 12236 12237 #define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 12238 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint32_t), \ 12239 IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1 /*fAlignMask*/, \ 12240 (uintptr_t)iemNativeHlpMemFlatMapDataU32Atomic, pCallEntry->idxInstr) 12048 12241 12049 12242 #define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 12050 12243 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint32_t), \ 12051 IEM_ACCESS_ TYPE_READ | IEM_ACCESS_TYPE_WRITE, sizeof(uint32_t) - 1 /*fAlignMask*/, \12244 IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1 /*fAlignMask*/, \ 12052 12245 (uintptr_t)iemNativeHlpMemFlatMapDataU32Rw, pCallEntry->idxInstr) 12053 12246 12054 12247 #define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 12055 12248 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint32_t), \ 12056 IEM_ACCESS_ TYPE_WRITE, sizeof(uint32_t) - 1 /*fAlignMask*/, \12249 IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMask*/, \ 12057 12250 (uintptr_t)iemNativeHlpMemFlatMapDataU32Wo, pCallEntry->idxInstr) \ 12058 12251 12059 12252 #define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 12060 12253 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint32_t), \ 12061 IEM_ACCESS_ TYPE_READ, sizeof(uint32_t) - 1 /*fAlignMask*/, \12254 IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1 /*fAlignMask*/, \ 12062 12255 (uintptr_t)iemNativeHlpMemFlatMapDataU32Ro, pCallEntry->idxInstr) 12063 12256 12064 12257 #define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \ 12065 12258 off = iemNativeEmitMemMapCommon(pReNative, off, a_pi32Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(int32_t), \ 12066 IEM_ACCESS_ TYPE_WRITE, sizeof(uint32_t) - 1 /*fAlignMask*/, \12259 IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMask*/, \ 12067 12260 (uintptr_t)iemNativeHlpMemFlatMapDataU32Wo, pCallEntry->idxInstr) \ 12068 12261 12262 12263 #define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 12264 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint64_t), \ 12265 IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1 /*fAlignMask*/, \ 12266 (uintptr_t)iemNativeHlpMemFlatMapDataU64Atomic, pCallEntry->idxInstr) 12069 12267 12070 12268 #define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 12071 12269 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint64_t), \ 12072 IEM_ACCESS_ TYPE_READ | IEM_ACCESS_TYPE_WRITE, sizeof(uint64_t) - 1 /*fAlignMask*/, \12270 IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1 /*fAlignMask*/, \ 12073 12271 (uintptr_t)iemNativeHlpMemFlatMapDataU64Rw, pCallEntry->idxInstr) 12074 12272 12075 12273 #define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 12076 12274 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint64_t), \ 12077 IEM_ACCESS_ TYPE_WRITE, sizeof(uint64_t) - 1 /*fAlignMask*/, \12275 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, \ 12078 12276 (uintptr_t)iemNativeHlpMemFlatMapDataU64Wo, pCallEntry->idxInstr) \ 12079 12277 12080 12278 #define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 12081 12279 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint64_t), \ 12082 IEM_ACCESS_ TYPE_READ, sizeof(uint64_t) - 1 /*fAlignMask*/, \12280 IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1 /*fAlignMask*/, \ 12083 12281 (uintptr_t)iemNativeHlpMemFlatMapDataU64Ro, pCallEntry->idxInstr) 12084 12282 12085 12283 #define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \ 12086 12284 off = iemNativeEmitMemMapCommon(pReNative, off, a_pi64Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(int64_t), \ 12087 IEM_ACCESS_ TYPE_WRITE, sizeof(uint64_t) - 1 /*fAlignMask*/, \12285 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, \ 12088 12286 (uintptr_t)iemNativeHlpMemFlatMapDataU64Wo, pCallEntry->idxInstr) \ 12089 12287 … … 12091 12289 #define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \ 12092 12290 off = iemNativeEmitMemMapCommon(pReNative, off, a_pr80Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTFLOAT80U), \ 12093 IEM_ACCESS_ TYPE_WRITE, sizeof(uint64_t) - 1 /*fAlignMask*/, \12291 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, \ 12094 12292 (uintptr_t)iemNativeHlpMemFlatMapDataR80Wo, pCallEntry->idxInstr) \ 12095 12293 12096 12294 #define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \ 12097 12295 off = iemNativeEmitMemMapCommon(pReNative, off, a_pd80Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTFLOAT80U), \ 12098 IEM_ACCESS_ TYPE_WRITE, sizeof(uint64_t) - 1 /*fAlignMask*/, /** @todo check BCD align */ \12296 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask*/, /** @todo check BCD align */ \ 12099 12297 (uintptr_t)iemNativeHlpMemFlatMapDataD80Wo, pCallEntry->idxInstr) \ 12100 12298 12299 12300 #define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 12301 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT128U), \ 12302 IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \ 12303 (uintptr_t)iemNativeHlpMemFlatMapDataU128Atomic, pCallEntry->idxInstr) 12101 12304 12102 12305 #define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 12103 12306 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT128U), \ 12104 IEM_ACCESS_ TYPE_READ | IEM_ACCESS_TYPE_WRITE, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \12307 IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \ 12105 12308 (uintptr_t)iemNativeHlpMemFlatMapDataU128Rw, pCallEntry->idxInstr) 12106 12309 12107 12310 #define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 12108 12311 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT128U), \ 12109 IEM_ACCESS_ TYPE_WRITE, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \12312 IEM_ACCESS_DATA_W, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \ 12110 12313 (uintptr_t)iemNativeHlpMemFlatMapDataU128Wo, pCallEntry->idxInstr) \ 12111 12314 12112 12315 #define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 12113 12316 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT128U), \ 12114 IEM_ACCESS_ TYPE_READ, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \12317 IEM_ACCESS_DATA_R, sizeof(RTUINT128U) - 1 /*fAlignMask*/, \ 12115 12318 (uintptr_t)iemNativeHlpMemFlatMapDataU128Ro, pCallEntry->idxInstr) 12116 12319 … … 12144 12347 12145 12348 #ifdef VBOX_STRICT 12349 # define IEM_MAP_HLP_FN_NO_AT(a_fAccess, a_fnBase) \ 12350 ( ((a_fAccess) & (IEM_ACCESS_TYPE_MASK | IEM_ACCESS_ATOMIC)) == (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ) \ 12351 ? (uintptr_t)RT_CONCAT(a_fnBase,Rw) \ 12352 : ((a_fAccess) & (IEM_ACCESS_TYPE_MASK | IEM_ACCESS_ATOMIC)) == IEM_ACCESS_TYPE_READ \ 12353 ? (uintptr_t)RT_CONCAT(a_fnBase,Ro) : (uintptr_t)RT_CONCAT(a_fnBase,Wo) ) 12146 12354 # define IEM_MAP_HLP_FN(a_fAccess, a_fnBase) \ 12147 ( ((a_fAccess) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ) \ 12148 ? (uintptr_t)RT_CONCAT(a_fnBase,Rw) \ 12149 : ((a_fAccess) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ \ 12150 ? (uintptr_t)RT_CONCAT(a_fnBase,Ro) : (uintptr_t)RT_CONCAT(a_fnBase,Wo) ) 12355 ( ((a_fAccess) & (IEM_ACCESS_TYPE_MASK | IEM_ACCESS_ATOMIC)) == (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ | IEM_ACCESS_ATOMIC) \ 12356 ? (uintptr_t)RT_CONCAT(a_fnBase,Atomic) \ 12357 : IEM_MAP_HLP_FN_NO_AT(a_fAccess, a_fnBase) ) 12151 12358 12152 12359 if (iSegReg == UINT8_MAX) … … 12168 12375 case 16: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU128)); break; 12169 12376 # if 0 12170 case 32: Assert(pfnFunction == IEM_MAP_HLP_FN (fAccess, iemNativeHlpMemFlatMapDataU256)); break;12171 case 64: Assert(pfnFunction == IEM_MAP_HLP_FN (fAccess, iemNativeHlpMemFlatMapDataU512)); break;12377 case 32: Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemFlatMapDataU256)); break; 12378 case 64: Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemFlatMapDataU512)); break; 12172 12379 # endif 12173 12380 default: AssertFailed(); break; … … 12190 12397 case 16: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU128)); break; 12191 12398 # if 0 12192 case 32: Assert(pfnFunction == IEM_MAP_HLP_FN (fAccess, iemNativeHlpMemMapDataU256)); break;12193 case 64: Assert(pfnFunction == IEM_MAP_HLP_FN (fAccess, iemNativeHlpMemMapDataU512)); break;12399 case 32: Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemMapDataU256)); break; 12400 case 64: Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemMapDataU512)); break; 12194 12401 # endif 12195 12402 default: AssertFailed(); break; … … 12197 12404 } 12198 12405 # undef IEM_MAP_HLP_FN 12406 # undef IEM_MAP_HLP_FN_NO_AT 12199 12407 #endif 12200 12408 … … 12353 12561 12354 12562 12563 #define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) \ 12564 off = iemNativeEmitMemCommitAndUnmap(pReNative, off, (a_bMapInfo), IEM_ACCESS_DATA_ATOMIC, \ 12565 (uintptr_t)iemNativeHlpMemCommitAndUnmapAtomic, pCallEntry->idxInstr) 12566 12355 12567 #define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo) \ 12356 off = iemNativeEmitMemCommitAndUnmap(pReNative, off, (a_bMapInfo), IEM_ACCESS_ TYPE_READ | IEM_ACCESS_TYPE_WRITE, \12568 off = iemNativeEmitMemCommitAndUnmap(pReNative, off, (a_bMapInfo), IEM_ACCESS_DATA_RW, \ 12357 12569 (uintptr_t)iemNativeHlpMemCommitAndUnmapRw, pCallEntry->idxInstr) 12358 12570 12359 12571 #define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo) \ 12360 off = iemNativeEmitMemCommitAndUnmap(pReNative, off, (a_bMapInfo), IEM_ACCESS_ TYPE_WRITE, \12572 off = iemNativeEmitMemCommitAndUnmap(pReNative, off, (a_bMapInfo), IEM_ACCESS_DATA_W, \ 12361 12573 (uintptr_t)iemNativeHlpMemCommitAndUnmapWo, pCallEntry->idxInstr) 12362 12574 12363 12575 #define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo) \ 12364 off = iemNativeEmitMemCommitAndUnmap(pReNative, off, (a_bMapInfo), IEM_ACCESS_ TYPE_READ, \12576 off = iemNativeEmitMemCommitAndUnmap(pReNative, off, (a_bMapInfo), IEM_ACCESS_DATA_R, \ 12365 12577 (uintptr_t)iemNativeHlpMemCommitAndUnmapRo, pCallEntry->idxInstr) 12366 12578 … … 12377 12589 || pReNative->Core.aVars[idxVarUnmapInfo].idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS); /* must be initialized */ 12378 12590 #ifdef VBOX_STRICT 12379 switch (fAccess & IEM_ACCESS_TYPE_MASK) 12380 { 12381 case IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE: Assert(pfnFunction == (uintptr_t)iemNativeHlpMemCommitAndUnmapRw); break; 12382 case IEM_ACCESS_TYPE_WRITE: Assert(pfnFunction == (uintptr_t)iemNativeHlpMemCommitAndUnmapWo); break; 12383 case IEM_ACCESS_TYPE_READ: Assert(pfnFunction == (uintptr_t)iemNativeHlpMemCommitAndUnmapRo); break; 12591 switch (fAccess & (IEM_ACCESS_TYPE_MASK | IEM_ACCESS_ATOMIC)) 12592 { 12593 case IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_ATOMIC: 12594 Assert(pfnFunction == (uintptr_t)iemNativeHlpMemCommitAndUnmapAtomic); break; 12595 case IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE: 12596 Assert(pfnFunction == (uintptr_t)iemNativeHlpMemCommitAndUnmapRw); break; 12597 case IEM_ACCESS_TYPE_WRITE: 12598 Assert(pfnFunction == (uintptr_t)iemNativeHlpMemCommitAndUnmapWo); break; 12599 case IEM_ACCESS_TYPE_READ: 12600 Assert(pfnFunction == (uintptr_t)iemNativeHlpMemCommitAndUnmapRo); break; 12384 12601 default: AssertFailed(); 12385 12602 } -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py
r102883 r102977 882 882 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ), 883 883 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ), 884 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ), 884 885 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ), 885 886 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ), 886 887 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ), 888 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ), 887 889 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ), 888 890 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ), 889 891 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ), 892 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ), 890 893 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ), 891 894 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ), 892 895 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ), 896 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ), 893 897 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ), 894 898 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ), 895 899 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ), 900 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ), 896 901 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ), 897 902 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ), -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdRecompiler.cpp
r102876 r102977 2665 2665 * Init the execution environment. 2666 2666 */ 2667 #ifdef RT_ARCH_ARM64 /** @todo ARM64: fix unaligned locked instructions properly. @bugref{10547} */ 2667 #if 1 /** @todo this seems like a good idea, however if we ever share memory 2668 * directly with other threads on the host, it isn't necessarily... */ 2668 2669 if (pVM->cCpus == 1) 2669 2670 iemInitExec(pVCpu, IEM_F_X86_DISREGARD_LOCK /*fExecOpts*/); -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r102949 r102977 706 706 #endif /* LOG_ENABLED || VBOX_STRICT */ 707 707 708 709 708 #if !defined(VBOX_VMM_TARGET_ARMV8) 709 710 710 /** 711 711 * Handle pending ring-3 I/O port write. … … 851 851 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu); 852 852 } 853 853 854 #endif /* VBOX_VMM_TARGET_ARMV8 */ 854 855 855 856 856 /** … … 893 893 rc = VINF_EM_DBG_STEPPED; 894 894 } 895 #ifndef VBOX_VMM_TARGET_ARMV8 896 if (rc != VINF_EM_EMULATE_SPLIT_LOCK) 897 { /* likely */ } 898 else 899 { 900 rc = emR3ExecuteSplitLockInstruction(pVM, pVCpu); 901 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE) 902 rc = VINF_EM_DBG_STEPPED; 903 } 904 #endif 895 905 break; 896 906 … … 1116 1126 if (rcStrict != VINF_SUCCESS) 1117 1127 { 1128 #ifndef VBOX_VMM_TARGET_ARMV8 1129 if (rcStrict == VINF_EM_EMULATE_SPLIT_LOCK) 1130 rcStrict = emR3ExecuteSplitLockInstruction(pVM, pVCpu); 1131 #endif 1132 if (rcStrict != VINF_SUCCESS) 1133 { 1118 1134 #if 0 1119 if (RT_LIKELY(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)) 1135 if (RT_LIKELY(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)) 1136 break; 1137 /* Fatal error: */ 1138 #endif 1120 1139 break; 1121 /* Fatal error: */ 1122 #endif 1123 break; 1140 } 1124 1141 } 1125 1142 … … 2556 2573 else if (rc == VINF_SUCCESS) 2557 2574 rc = VINF_EM_RESCHEDULE; /* Need to check whether we can run in HM or NEM again. */ 2575 #ifndef VBOX_VMM_TARGET_ARMV8 2576 if (rc != VINF_EM_EMULATE_SPLIT_LOCK) 2577 { /* likely */ } 2578 else 2579 rc = VBOXSTRICTRC_TODO(emR3ExecuteSplitLockInstruction(pVM, pVCpu)); 2580 #endif 2558 2581 fFFDone = false; 2559 2582 break; -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r102850 r102977 291 291 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, 292 292 "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu); 293 STAMR3RegisterF(pVM, &pVCpu->iem.s.cMisalignedAtomics, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, 294 "Number of misaligned (for the host) atomic instructions", "/IEM/CPU%u/cMisalignedAtomics", idCpu); 293 295 294 296 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbMisses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, -
trunk/src/VBox/VMM/include/IEMInline.h
r102876 r102977 56 56 ) 57 57 { 58 if (pVCpu->iem.s.rcPassUp == VINF_SUCCESS) 59 rcStrict = VINF_SUCCESS; 58 rcStrict = pVCpu->iem.s.rcPassUp; 59 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 60 { /* likely */ } 60 61 else 61 {62 62 pVCpu->iem.s.cRetPassUpStatus++; 63 rcStrict = pVCpu->iem.s.rcPassUp;64 }65 63 } 66 64 else if (RT_SUCCESS(rcStrict)) … … 79 77 || rcStrict == VINF_EM_RAW_TO_R3 80 78 || rcStrict == VINF_EM_TRIPLE_FAULT 79 || rcStrict == VINF_EM_EMULATE_SPLIT_LOCK 81 80 || rcStrict == VINF_GIM_R3_HYPERCALL 82 81 /* raw-mode / virt handlers only: */ … … 117 116 pVCpu->iem.s.cRetErrStatuses++; 118 117 } 119 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS) 120 { 121 pVCpu->iem.s.cRetPassUpStatus++; 118 else 119 { 122 120 rcStrict = pVCpu->iem.s.rcPassUp; 121 if (rcStrict != VINF_SUCCESS) 122 pVCpu->iem.s.cRetPassUpStatus++; 123 123 } 124 124 … … 3936 3936 3937 3937 3938 DECL_INLINE_THROW(void) iemMemCommitAndUnmapAtJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 3939 { 3940 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 3941 if (RT_LIKELY(bMapInfo == 0)) 3942 return; 3943 # endif 3944 iemMemCommitAndUnmapAtSafeJmp(pVCpu, bMapInfo); 3945 } 3946 3947 3938 3948 DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 3939 3949 { … … 3991 4001 #endif 3992 4002 4003 #define TMPL_MEM_WITH_ATOMIC_MAPPING 4004 3993 4005 #define TMPL_MEM_TYPE uint8_t 3994 4006 #define TMPL_MEM_TYPE_ALIGN 0 … … 4028 4040 4029 4041 #undef TMPL_MEM_WITH_STACK 4042 #undef TMPL_MEM_WITH_ATOMIC_MAPPING 4030 4043 4031 4044 #define TMPL_MEM_NO_STORE … … 4058 4071 #include "../VMMAll/IEMAllMemRWTmplInline.cpp.h" 4059 4072 4073 #define TMPL_MEM_WITH_ATOMIC_MAPPING 4060 4074 #define TMPL_MEM_TYPE RTUINT128U 4061 4075 #define TMPL_MEM_TYPE_ALIGN 15 … … 4065 4079 #define TMPL_MEM_FMT_DESC "dqword" 4066 4080 #include "../VMMAll/IEMAllMemRWTmplInline.cpp.h" 4081 #undef TMPL_MEM_WITH_ATOMIC_MAPPING 4067 4082 4068 4083 #undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK -
trunk/src/VBox/VMM/include/IEMInternal.h
r102896 r102977 1604 1604 /** Number of times RZ left with instruction commit pending for ring-3. */ 1605 1605 uint32_t cPendingCommit; 1606 /** Number of misaligned (host sense) atomic instruction accesses. */ 1607 uint32_t cMisalignedAtomics; 1606 1608 /** Number of long jumps. */ 1607 1609 uint32_t cLongJumps; … … 1641 1643 uint8_t cLogRelWrMsr; 1642 1644 /** Alignment padding. */ 1643 uint8_t abAlignment9[4 6];1645 uint8_t abAlignment9[42]; 1644 1646 1645 1647 /** @name Recompilation … … 1989 1991 /** Not locked, accessed via the TLB. */ 1990 1992 #define IEM_ACCESS_NOT_LOCKED UINT32_C(0x00001000) 1993 /** Atomic access. 1994 * This enables special alignment checks and the VINF_EM_EMULATE_SPLIT_LOCK 1995 * fallback for misaligned stuff. See @bugref{10547}. */ 1996 #define IEM_ACCESS_ATOMIC UINT32_C(0x00002000) 1991 1997 /** Valid bit mask. */ 1992 #define IEM_ACCESS_VALID_MASK UINT32_C(0x0000 1fff)1998 #define IEM_ACCESS_VALID_MASK UINT32_C(0x00003fff) 1993 1999 /** Shift count for the TLB flags (upper word). */ 1994 2000 #define IEM_ACCESS_SHIFT_TLB_FLAGS 16 1995 2001 2002 /** Atomic read+write data alias. */ 2003 #define IEM_ACCESS_DATA_ATOMIC (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA | IEM_ACCESS_ATOMIC) 1996 2004 /** Read+write data alias. */ 1997 2005 #define IEM_ACCESS_DATA_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA) … … 5201 5209 #ifdef IEM_WITH_SETJMP 5202 5210 uint8_t *iemMemMapDataU8RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5211 uint8_t *iemMemMapDataU8AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5203 5212 uint8_t *iemMemMapDataU8WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5204 5213 uint8_t const *iemMemMapDataU8RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5205 5214 uint16_t *iemMemMapDataU16RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5215 uint16_t *iemMemMapDataU16AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5206 5216 uint16_t *iemMemMapDataU16WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5207 5217 uint16_t const *iemMemMapDataU16RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5208 5218 uint32_t *iemMemMapDataU32RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5219 uint32_t *iemMemMapDataU32AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5209 5220 uint32_t *iemMemMapDataU32WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5210 5221 uint32_t const *iemMemMapDataU32RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5211 5222 uint64_t *iemMemMapDataU64RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5223 uint64_t *iemMemMapDataU64AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5212 5224 uint64_t *iemMemMapDataU64WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5213 5225 uint64_t const *iemMemMapDataU64RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; … … 5219 5231 PCRTPBCD80U iemMemMapDataD80RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5220 5232 PRTUINT128U iemMemMapDataU128RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5233 PRTUINT128U iemMemMapDataU128AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5221 5234 PRTUINT128U iemMemMapDataU128WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5222 5235 PCRTUINT128U iemMemMapDataU128RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; … … 5224 5237 void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5225 5238 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5239 void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5226 5240 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5227 5241 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP; -
trunk/src/VBox/VMM/include/IEMMc.h
r102876 r102977 1508 1508 1509 1509 /** 1510 * Maps guest memory for byte atomic read+write direct (or bounce) buffer 1511 * acccess, for atomic operations. 1512 * 1513 * @param[out] a_pu8Mem Where to return the pointer to the mapping. 1514 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 1515 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX! 1516 * @param[in] a_GCPtrMem The memory address. 1517 * @remarks Will return/long jump on errors. 1518 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 1519 */ 1520 #ifndef IEM_WITH_SETJMP 1521 # define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1522 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \ 1523 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, 0)) 1524 #else 1525 # define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1526 (a_pu8Mem) = iemMemMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1527 #endif 1528 1529 /** 1510 1530 * Maps guest memory for byte read+write direct (or bounce) buffer acccess. 1511 1531 * … … 1565 1585 1566 1586 /** 1587 * Maps guest memory for byte atomic read+write direct (or bounce) buffer 1588 * acccess, flat address variant. 1589 * 1590 * @param[out] a_pu8Mem Where to return the pointer to the mapping. 1591 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 1592 * @param[in] a_GCPtrMem The memory address. 1593 * @remarks Will return/long jump on errors. 1594 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 1595 */ 1596 #ifndef IEM_WITH_SETJMP 1597 # define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1598 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \ 1599 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, 0)) 1600 #else 1601 # define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1602 (a_pu8Mem) = iemMemFlatMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1603 #endif 1604 1605 /** 1567 1606 * Maps guest memory for byte read+write direct (or bounce) buffer acccess, flat 1568 1607 * address variant. … … 1625 1664 1626 1665 /** 1666 * Maps guest memory for word atomic read+write direct (or bounce) buffer acccess. 1667 * 1668 * @param[out] a_pu16Mem Where to return the pointer to the mapping. 1669 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 1670 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX! 1671 * @param[in] a_GCPtrMem The memory address. 1672 * @remarks Will return/long jump on errors. 1673 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 1674 */ 1675 #ifndef IEM_WITH_SETJMP 1676 # define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1677 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \ 1678 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1)) 1679 #else 1680 # define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1681 (a_pu16Mem) = iemMemMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1682 #endif 1683 1684 /** 1627 1685 * Maps guest memory for word read+write direct (or bounce) buffer acccess. 1628 1686 * … … 1682 1740 1683 1741 /** 1742 * Maps guest memory for word atomic read+write direct (or bounce) buffer 1743 * acccess, flat address variant. 1744 * 1745 * @param[out] a_pu16Mem Where to return the pointer to the mapping. 1746 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 1747 * @param[in] a_GCPtrMem The memory address. 1748 * @remarks Will return/long jump on errors. 1749 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 1750 */ 1751 #ifndef IEM_WITH_SETJMP 1752 # define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1753 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \ 1754 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1)) 1755 #else 1756 # define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1757 (a_pu16Mem) = iemMemFlatMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1758 #endif 1759 1760 /** 1684 1761 * Maps guest memory for word read+write direct (or bounce) buffer acccess, flat 1685 1762 * address variant. … … 1760 1837 1761 1838 /** 1839 * Maps guest memory for dword atomic read+write direct (or bounce) buffer acccess. 1840 * 1841 * @param[out] a_pu32Mem Where to return the pointer to the mapping. 1842 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 1843 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX! 1844 * @param[in] a_GCPtrMem The memory address. 1845 * @remarks Will return/long jump on errors. 1846 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 1847 */ 1848 #ifndef IEM_WITH_SETJMP 1849 # define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1850 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \ 1851 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1)) 1852 #else 1853 # define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1854 (a_pu32Mem) = iemMemMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1855 #endif 1856 1857 /** 1762 1858 * Maps guest memory for dword read+write direct (or bounce) buffer acccess. 1763 1859 * … … 1817 1913 1818 1914 /** 1915 * Maps guest memory for dword atomic read+write direct (or bounce) buffer 1916 * acccess, flat address variant. 1917 * 1918 * @param[out] a_pu32Mem Where to return the pointer to the mapping. 1919 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 1920 * @param[in] a_GCPtrMem The memory address. 1921 * @remarks Will return/long jump on errors. 1922 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 1923 */ 1924 #ifndef IEM_WITH_SETJMP 1925 # define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1926 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \ 1927 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1)) 1928 #else 1929 # define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1930 (a_pu32Mem) = iemMemFlatMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1931 #endif 1932 1933 /** 1819 1934 * Maps guest memory for dword read+write direct (or bounce) buffer acccess, 1820 1935 * flat address variant. … … 1913 2028 1914 2029 /** 2030 * Maps guest memory for qword atomic read+write direct (or bounce) buffer acccess. 2031 * 2032 * @param[out] a_pu64Mem Where to return the pointer to the mapping. 2033 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 2034 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX! 2035 * @param[in] a_GCPtrMem The memory address. 2036 * @remarks Will return/long jump on errors. 2037 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 2038 */ 2039 #ifndef IEM_WITH_SETJMP 2040 # define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2041 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \ 2042 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1)) 2043 #else 2044 # define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2045 (a_pu64Mem) = iemMemMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2046 #endif 2047 2048 /** 1915 2049 * Maps guest memory for qword read+write direct (or bounce) buffer acccess. 1916 2050 * … … 1970 2104 1971 2105 /** 2106 * Maps guest memory for qword atomic read+write direct (or bounce) buffer 2107 * acccess, flat address variant. 2108 * 2109 * @param[out] a_pu64Mem Where to return the pointer to the mapping. 2110 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 2111 * @param[in] a_GCPtrMem The memory address. 2112 * @remarks Will return/long jump on errors. 2113 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 2114 */ 2115 #ifndef IEM_WITH_SETJMP 2116 # define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2117 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \ 2118 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1)) 2119 #else 2120 # define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2121 (a_pu64Mem) = iemMemFlatMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2122 #endif 2123 2124 /** 1972 2125 * Maps guest memory for qword read+write direct (or bounce) buffer acccess, 1973 2126 * flat address variant. … … 2066 2219 2067 2220 /** 2221 * Maps guest memory for dqword atomic read+write direct (or bounce) buffer acccess. 2222 * 2223 * @param[out] a_pu128Mem Where to return the pointer to the mapping. 2224 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 2225 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX! 2226 * @param[in] a_GCPtrMem The memory address. 2227 * @remarks Will return/long jump on errors. 2228 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 2229 */ 2230 #ifndef IEM_WITH_SETJMP 2231 # define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2232 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128U), (a_iSeg), \ 2233 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128U) - 1)) 2234 #else 2235 # define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2236 (a_pu128Mem) = iemMemMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2237 #endif 2238 2239 /** 2068 2240 * Maps guest memory for dqword read+write direct (or bounce) buffer acccess. 2069 2241 * … … 2123 2295 2124 2296 /** 2297 * Maps guest memory for dqword atomic read+write direct (or bounce) buffer 2298 * access, flat address variant. 2299 * 2300 * @param[out] a_pu128Mem Where to return the pointer to the mapping. 2301 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 2302 * @param[in] a_GCPtrMem The memory address. 2303 * @remarks Will return/long jump on errors. 2304 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC 2305 */ 2306 #ifndef IEM_WITH_SETJMP 2307 # define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 2308 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \ 2309 (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128) - 1)) 2310 #else 2311 # define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 2312 (a_pu128Mem) = iemMemFlatMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2313 #endif 2314 2315 /** 2125 2316 * Maps guest memory for dqword read+write direct (or bounce) buffer acccess, 2126 2317 * flat address variant. … … 2269 2460 #else 2270 2461 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo)) 2462 #endif 2463 2464 /** Commits the memory and unmaps guest memory previously mapped ATOMIC. 2465 * @remarks May return. 2466 * @note Implictly frees the a_bMapInfo variable. 2467 */ 2468 #ifndef IEM_WITH_SETJMP 2469 # define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo)) 2470 #else 2471 # define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo)) 2271 2472 #endif 2272 2473 -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r102847 r102977 44 44 #endif 45 45 46 #ifdef VBOX_WITH_STATISTICS 46 47 /** Always count instructions for now. */ 47 #define IEMNATIVE_WITH_INSTRUCTION_COUNTING 48 # define IEMNATIVE_WITH_INSTRUCTION_COUNTING 49 #endif 48 50 49 51 -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r102876 r102977 931 931 #define IEM_MC_POP_GREG_U64(a_iGReg) do { CHK_GREG_IDX(a_iGReg); (void)fMcBegin; } while (0) 932 932 933 #define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pd80Mem); (a_pd80Mem) = NULL; CHK_PTYPE(RTPBCD80U *, a_pd80Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 934 #define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pi16Mem); (a_pi16Mem) = NULL; CHK_PTYPE(int16_t *, a_pi16Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 935 #define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pi32Mem); (a_pi32Mem) = NULL; CHK_PTYPE(int32_t *, a_pi32Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 936 #define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pi64Mem); (a_pi64Mem) = NULL; CHK_PTYPE(int64_t *, a_pi64Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 937 #define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pr32Mem); (a_pr32Mem) = NULL; CHK_PTYPE(RTFLOAT32U *, a_pr32Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 938 #define IEM_MC_MEM_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pr64Mem); (a_pr64Mem) = NULL; CHK_PTYPE(RTFLOAT64U *, a_pr64Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 939 #define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pr80Mem); (a_pr80Mem) = NULL; CHK_PTYPE(RTFLOAT80U *, a_pr80Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 940 #define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu8Mem); (a_pu8Mem) = NULL; CHK_PTYPE(uint8_t *, a_pu8Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 941 #define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu8Mem); (a_pu8Mem) = NULL; CHK_PTYPE(uint8_t const *, a_pu8Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 942 #define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu8Mem); (a_pu8Mem) = NULL; CHK_PTYPE(uint8_t *, a_pu8Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 943 #define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu16Mem); (a_pu16Mem) = NULL; CHK_PTYPE(uint16_t *, a_pu16Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 944 #define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu16Mem); (a_pu16Mem) = NULL; CHK_PTYPE(uint16_t const *, a_pu16Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 945 #define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu16Mem); (a_pu16Mem) = NULL; CHK_PTYPE(uint16_t *, a_pu16Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 946 #define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu32Mem); (a_pu32Mem) = NULL; CHK_PTYPE(uint32_t *, a_pu32Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 947 #define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu32Mem); (a_pu32Mem) = NULL; CHK_PTYPE(uint32_t const *, a_pu32Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 948 #define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu32Mem); (a_pu32Mem) = NULL; CHK_PTYPE(uint32_t *, a_pu32Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 949 #define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu64Mem); (a_pu64Mem) = NULL; CHK_PTYPE(uint64_t *, a_pu64Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 950 #define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu64Mem); (a_pu64Mem) = NULL; CHK_PTYPE(uint64_t const *, a_pu64Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 951 #define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu64Mem); (a_pu64Mem) = NULL; CHK_PTYPE(uint64_t *, a_pu64Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 952 #define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu128Mem); (a_pu128Mem) = NULL; CHK_PTYPE(RTUINT128U *, a_pu128Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 953 #define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu128Mem); (a_pu128Mem) = NULL; CHK_PTYPE(RTUINT128U const *, a_pu128Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 954 #define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu128Mem); (a_pu128Mem) = NULL; CHK_PTYPE(RTUINT128U *, a_pu128Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 955 933 #define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pd80Mem); (a_pd80Mem) = NULL; CHK_PTYPE(RTPBCD80U *, a_pd80Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 934 #define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pi16Mem); (a_pi16Mem) = NULL; CHK_PTYPE(int16_t *, a_pi16Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 935 #define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pi32Mem); (a_pi32Mem) = NULL; CHK_PTYPE(int32_t *, a_pi32Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 936 #define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pi64Mem); (a_pi64Mem) = NULL; CHK_PTYPE(int64_t *, a_pi64Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 937 #define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pr32Mem); (a_pr32Mem) = NULL; CHK_PTYPE(RTFLOAT32U *, a_pr32Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 938 #define IEM_MC_MEM_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pr64Mem); (a_pr64Mem) = NULL; CHK_PTYPE(RTFLOAT64U *, a_pr64Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 939 #define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pr80Mem); (a_pr80Mem) = NULL; CHK_PTYPE(RTFLOAT80U *, a_pr80Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 940 #define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu8Mem); (a_pu8Mem) = NULL; CHK_PTYPE(uint8_t *, a_pu8Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 941 #define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu8Mem); (a_pu8Mem) = NULL; CHK_PTYPE(uint8_t *, a_pu8Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 942 #define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu8Mem); (a_pu8Mem) = NULL; CHK_PTYPE(uint8_t const *, a_pu8Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 943 #define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu8Mem); (a_pu8Mem) = NULL; CHK_PTYPE(uint8_t *, a_pu8Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 944 #define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu16Mem); (a_pu16Mem) = NULL; CHK_PTYPE(uint16_t *, a_pu16Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 945 #define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu16Mem); (a_pu16Mem) = NULL; CHK_PTYPE(uint16_t *, a_pu16Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 946 #define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu16Mem); (a_pu16Mem) = NULL; CHK_PTYPE(uint16_t const *, a_pu16Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 947 #define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu16Mem); (a_pu16Mem) = NULL; CHK_PTYPE(uint16_t *, a_pu16Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 948 #define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu32Mem); (a_pu32Mem) = NULL; CHK_PTYPE(uint32_t *, a_pu32Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 949 #define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu32Mem); (a_pu32Mem) = NULL; CHK_PTYPE(uint32_t *, a_pu32Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 950 #define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu32Mem); (a_pu32Mem) = NULL; CHK_PTYPE(uint32_t const *, a_pu32Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 951 #define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu32Mem); (a_pu32Mem) = NULL; CHK_PTYPE(uint32_t *, a_pu32Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 952 #define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu64Mem); (a_pu64Mem) = NULL; CHK_PTYPE(uint64_t *, a_pu64Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 953 #define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu64Mem); (a_pu64Mem) = NULL; CHK_PTYPE(uint64_t *, a_pu64Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 954 #define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu64Mem); (a_pu64Mem) = NULL; CHK_PTYPE(uint64_t const *, a_pu64Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 955 #define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu64Mem); (a_pu64Mem) = NULL; CHK_PTYPE(uint64_t *, a_pu64Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 956 #define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do{ CHK_VAR(a_pu128Mem); (a_pu128Mem) = NULL; CHK_PTYPE(RTUINT128U *, a_pu128Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 957 #define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu128Mem); (a_pu128Mem) = NULL; CHK_PTYPE(RTUINT128U *, a_pu128Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 958 #define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu128Mem); (a_pu128Mem) = NULL; CHK_PTYPE(RTUINT128U const *, a_pu128Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 959 #define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu128Mem); (a_pu128Mem) = NULL; CHK_PTYPE(RTUINT128U *, a_pu128Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 960 961 #define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo) do { CHK_VAR(a_bMapInfo); CHK_TYPE(uint8_t, a_bMapInfo); (void)fMcBegin; } while (0) 956 962 #define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo) do { CHK_VAR(a_bMapInfo); CHK_TYPE(uint8_t, a_bMapInfo); (void)fMcBegin; } while (0) 957 963 #define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo) do { CHK_VAR(a_bMapInfo); CHK_TYPE(uint8_t, a_bMapInfo); (void)fMcBegin; } while (0)
Note:
See TracChangeset
for help on using the changeset viewer.