Changeset 104984 in vbox
- Timestamp:
- Jun 20, 2024 2:07:04 PM (9 months ago)
- svn:sync-xref-src-repo-rev:
- 163594
- Location:
- trunk
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/iprt/armv8.h
r104728 r104984 4052 4052 4053 4053 4054 /** 4055 * A64: Encodes TBZ (conditional branch w/ immediate) instructions. 4056 * 4057 * @returns The encoded instruction. 4058 * @param iImm14 Signed number of instruction to jump (i.e. *4). 4059 * @param iReg The GPR to check for zero / non-zero value. 4060 * @param iBitNo The bit to test for. 4061 */ 4062 DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrTbz(int32_t iImm14, uint32_t iReg, uint32_t iBitNo) 4063 { 4064 return Armv8A64MkInstrTbzTbnz(false /*fJmpIfNotZero*/, iImm14, iReg, iBitNo); 4065 } 4066 4067 4068 /** 4069 * A64: Encodes TBNZ (conditional branch w/ immediate) instructions. 4070 * 4071 * @returns The encoded instruction. 4072 * @param iImm14 Signed number of instruction to jump (i.e. *4). 4073 * @param iReg The GPR to check for zero / non-zero value. 4074 * @param iBitNo The bit to test for. 4075 */ 4076 DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrTbnz(int32_t iImm14, uint32_t iReg, uint32_t iBitNo) 4077 { 4078 return Armv8A64MkInstrTbzTbnz(true /*fJmpIfNotZero*/, iImm14, iReg, iBitNo); 4079 } 4080 4081 4054 4082 4055 4083 /** Armv8 Condition codes. */ -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r104956 r104984 3843 3843 iemRaiseXcptAdjustState(pVCpu, u8Vector); 3844 3844 3845 iemRecalcExecModeAndCpl Flags(pVCpu);3845 iemRecalcExecModeAndCplAndAcFlags(pVCpu); 3846 3846 3847 3847 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp
r104722 r104984 912 912 Assert(fEflNew & RT_BIT_32(1)); 913 913 IEMMISC_SET_EFL(pVCpu, fEflNew); 914 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_X86_AC) | iemCalcExecAcFlag(pVCpu); 914 915 return iemRegAddToRipAndFinishingClearingRfEx(pVCpu, cbInstr, fEflOld); 915 916 } … … 1692 1693 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 1693 1694 1694 iemRecalcExecModeAndCpl Flags(pVCpu);1695 iemRecalcExecModeAndCplAndAcFlags(pVCpu); 1695 1696 1696 1697 /** @todo single stepping */ … … 1942 1943 * mode. */ 1943 1944 1944 iemRecalcExecModeAndCpl Flags(pVCpu);1945 iemRecalcExecModeAndCplAndAcFlags(pVCpu); 1945 1946 1946 1947 /* Flush the prefetch buffer. */ … … 2168 2169 * mode. */ 2169 2170 2170 iemRecalcExecModeAndCpl Flags(pVCpu);2171 iemRecalcExecModeAndCplAndAcFlags(pVCpu); 2171 2172 2172 2173 /* Flush the prefetch buffer. */ … … 2499 2500 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs); 2500 2501 2501 iemRecalcExecModeAndCpl Flags(pVCpu); /* Affects iemRegAddToRspEx and the setting of RSP/SP below. */2502 iemRecalcExecModeAndCplAndAcFlags(pVCpu); /* Affects iemRegAddToRspEx and the setting of RSP/SP below. */ 2502 2503 2503 2504 if (cbPop) … … 2510 2511 pVCpu->cpum.GstCtx.sp = (uint16_t)NewOuterRsp.u; 2511 2512 2512 iemRecalcExecModeAndCpl Flags(pVCpu); /* Affects iemRegAddToRspEx and the setting of RSP/SP below. */2513 iemRecalcExecModeAndCplAndAcFlags(pVCpu); /* Affects iemRegAddToRspEx and the setting of RSP/SP below. */ 2513 2514 2514 2515 /** @todo check if the hidden bits are loaded correctly for 64-bit … … 2585 2586 * mode. */ 2586 2587 2587 iemRecalcExecModeAndCpl Flags(pVCpu);2588 iemRecalcExecModeAndCplAndAcFlags(pVCpu); 2588 2589 } 2589 2590 … … 2926 2927 Assert(uNewFlags & X86_EFL_1); 2927 2928 IEMMISC_SET_EFL(pVCpu, uNewFlags); 2929 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_X86_AC) | iemCalcExecAcFlag(pVCpu); 2928 2930 2929 2931 /* Flush the prefetch buffer. */ … … 3007 3009 pVCpu->cpum.GstCtx.rip = (uint16_t)uNewEip; 3008 3010 pVCpu->cpum.GstCtx.rsp = uNewEsp; /** @todo check this out! */ 3009 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK ))3011 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC)) 3010 3012 | (3 << IEM_F_X86_CPL_SHIFT) 3011 | IEM_F_MODE_X86_16BIT_PROT_V86; 3013 | IEM_F_MODE_X86_16BIT_PROT_V86 3014 | iemCalcExecAcFlag(pVCpu); 3012 3015 3013 3016 /* Flush the prefetch buffer. */ … … 3359 3362 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs); 3360 3363 3361 iemRecalcExecModeAndCpl Flags(pVCpu);3364 iemRecalcExecModeAndCplAndAcFlags(pVCpu); 3362 3365 3363 3366 /* Done! */ … … 3421 3424 pVCpu->cpum.GstCtx.rsp = uNewRsp; 3422 3425 3423 iemRecalcExecModeAndCpl Flags(pVCpu);3426 iemRecalcExecModeAndCplAndAcFlags(pVCpu); 3424 3427 3425 3428 /* Done! */ … … 3736 3739 } 3737 3740 3738 iemRecalcExecModeAndCpl Flags(pVCpu);3741 iemRecalcExecModeAndCplAndAcFlags(pVCpu); 3739 3742 3740 3743 /* Flush the prefetch buffer. */ … … 3971 3974 * word as to what happens if those are not identical (probably bad things). 3972 3975 */ 3973 iemRecalcExecModeAndCpl Flags(pVCpu);3976 iemRecalcExecModeAndCplAndAcFlags(pVCpu); 3974 3977 Assert(IEM_IS_16BIT_CODE(pVCpu)); 3975 3978 … … 4058 4061 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC; 4059 4062 4060 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK ))4063 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC)) 4061 4064 | IEM_F_MODE_X86_64BIT; 4062 4065 } … … 4074 4077 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC; 4075 4078 4076 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK ))4079 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC)) 4077 4080 | IEM_F_MODE_X86_32BIT_PROT 4078 4081 | iemCalc32BitFlatIndicatorEsDs(pVCpu); … … 4215 4218 4216 4219 if (!f32Bit) 4217 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK ))4220 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC)) 4218 4221 | (3 << IEM_F_X86_CPL_SHIFT) 4219 | IEM_F_MODE_X86_64BIT; 4222 | IEM_F_MODE_X86_64BIT 4223 | iemCalcExecAcFlag(pVCpu); 4220 4224 else 4221 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK ))4225 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC)) 4222 4226 | (3 << IEM_F_X86_CPL_SHIFT) 4223 4227 | IEM_F_MODE_X86_32BIT_PROT 4224 4228 /** @todo sort out the SS.BASE/LIM/ATTR claim by AMD and maybe we can switch to 4225 4229 * iemCalc32BitFlatIndicatorDsEs and move this up into the above branch. */ 4226 | iemCalc32BitFlatIndicator(pVCpu); 4230 | iemCalc32BitFlatIndicator(pVCpu) 4231 | iemCalcExecAcFlag(pVCpu); 4227 4232 4228 4233 /* Flush the prefetch buffer. */ … … 4298 4303 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT 4299 4304 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC; 4300 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK ))4305 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC)) 4301 4306 | IEM_F_MODE_X86_64BIT; 4302 4307 } … … 4309 4314 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT 4310 4315 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC; 4311 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK ))4316 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC)) 4312 4317 | IEM_F_MODE_X86_32BIT_PROT 4313 4318 | iemCalc32BitFlatIndicatorEsDs(pVCpu); … … 4402 4407 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK)) 4403 4408 | (3 << IEM_F_X86_CPL_SHIFT) 4404 | IEM_F_MODE_X86_64BIT; 4409 | IEM_F_MODE_X86_64BIT 4410 | iemCalcExecAcFlag(pVCpu); 4405 4411 } 4406 4412 else … … 4420 4426 | (3 << IEM_F_X86_CPL_SHIFT) 4421 4427 | IEM_F_MODE_X86_32BIT_PROT 4422 | iemCalc32BitFlatIndicatorEsDs(pVCpu); 4428 | iemCalc32BitFlatIndicatorEsDs(pVCpu) 4429 | iemCalcExecAcFlag(pVCpu); 4423 4430 } 4424 4431 pVCpu->cpum.GstCtx.cs.u64Base = 0; … … 5991 5998 /* Update the fExec flags if PE changed. */ 5992 5999 if ((uNewCrX ^ uOldCrX) & X86_CR0_PE) 5993 iemRecalcExecModeAndCpl Flags(pVCpu);6000 iemRecalcExecModeAndCplAndAcFlags(pVCpu); 5994 6001 5995 6002 /* -
trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h
r104419 r104984 7157 7157 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 7158 7158 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 7159 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_ CHECK_IRQ_BEFORE_AND_AFTER,7159 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_MODE | IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER, 7160 7160 RT_BIT_64(kIemNativeGstReg_GprFirst + X86_GREG_xSP), 7161 7161 iemCImpl_popf, pVCpu->iem.s.enmEffOpSize); -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h
r104956 r104984 6273 6273 DECL_INLINE_THROW(uint32_t) 6274 6274 iemNativeEmitMemFetchStoreDataCommon(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarValue, uint8_t iSegReg, 6275 uint8_t idxVarGCPtrMem, uint8_t cbMem, uint 8_t fAlignMask, IEMNATIVEMITMEMOP enmOp,6275 uint8_t idxVarGCPtrMem, uint8_t cbMem, uint32_t fAlignMaskAndCtl, IEMNATIVEMITMEMOP enmOp, 6276 6276 uintptr_t pfnFunction, uint8_t idxInstr, uint8_t offDisp = 0) 6277 6277 { … … 6296 6296 Assert(cbMem == 1 || cbMem == 2 || cbMem == 4 || cbMem == 8); 6297 6297 #endif 6298 Assert(!(fAlignMaskAndCtl & ~(UINT32_C(0xff) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE))); 6298 6299 AssertCompile(IEMNATIVE_CALL_ARG_GREG_COUNT >= 4); 6299 6300 #ifdef VBOX_STRICT … … 6316 6317 : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U64 ? (uintptr_t)iemNativeHlpMemFlatFetchDataU8_Sx_U64 6317 6318 : UINT64_C(0xc000b000a0009000) )); 6319 Assert(!fAlignMaskAndCtl); 6318 6320 break; 6319 6321 case 2: … … 6326 6328 : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U64 ? (uintptr_t)iemNativeHlpMemFlatFetchDataU16_Sx_U64 6327 6329 : UINT64_C(0xc000b000a0009000) )); 6330 Assert(fAlignMaskAndCtl <= 1); 6328 6331 break; 6329 6332 case 4: … … 6334 6337 : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U64 ? (uintptr_t)iemNativeHlpMemFlatFetchDataU32_Sx_U64 6335 6338 : UINT64_C(0xc000b000a0009000) )); 6339 Assert(fAlignMaskAndCtl <= 3); 6336 6340 break; 6337 6341 case 8: … … 6340 6344 : enmOp == kIemNativeEmitMemOp_Fetch ? (uintptr_t)iemNativeHlpMemFlatFetchDataU64 6341 6345 : UINT64_C(0xc000b000a0009000) )); 6346 Assert(fAlignMaskAndCtl <= 7); 6342 6347 break; 6343 6348 #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR … … 6350 6355 && ( pfnFunction == (uintptr_t)iemNativeHlpMemFlatStoreDataU128AlignedSse 6351 6356 || pfnFunction == (uintptr_t)iemNativeHlpMemFlatStoreDataU128NoAc))); 6357 Assert( pfnFunction == (uintptr_t)iemNativeHlpMemFlatFetchDataU128AlignedSse 6358 || pfnFunction == (uintptr_t)iemNativeHlpMemFlatStoreDataU128AlignedSse 6359 ? (fAlignMaskAndCtl & (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)) && (uint8_t)fAlignMaskAndCtl == 15 6360 : fAlignMaskAndCtl <= 15); 6352 6361 break; 6353 6362 case sizeof(RTUINT256U): … … 6358 6367 && ( pfnFunction == (uintptr_t)iemNativeHlpMemFlatStoreDataU256NoAc 6359 6368 || pfnFunction == (uintptr_t)iemNativeHlpMemFlatStoreDataU256AlignedAvx))); 6369 Assert( pfnFunction == (uintptr_t)iemNativeHlpMemFlatFetchDataU256AlignedAvx 6370 || pfnFunction == (uintptr_t)iemNativeHlpMemFlatStoreDataU256AlignedAvx 6371 ? (fAlignMaskAndCtl & IEM_MEMMAP_F_ALIGN_GP) && (uint8_t)fAlignMaskAndCtl == 31 6372 : fAlignMaskAndCtl <= 31); 6360 6373 break; 6361 6374 #endif … … 6378 6391 : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U64 ? (uintptr_t)iemNativeHlpMemFetchDataU8_Sx_U64 6379 6392 : UINT64_C(0xc000b000a0009000) )); 6393 Assert(!fAlignMaskAndCtl); 6380 6394 break; 6381 6395 case 2: … … 6388 6402 : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U64 ? (uintptr_t)iemNativeHlpMemFetchDataU16_Sx_U64 6389 6403 : UINT64_C(0xc000b000a0009000) )); 6404 Assert(fAlignMaskAndCtl <= 1); 6390 6405 break; 6391 6406 case 4: … … 6396 6411 : enmOp == kIemNativeEmitMemOp_Fetch_Sx_U64 ? (uintptr_t)iemNativeHlpMemFetchDataU32_Sx_U64 6397 6412 : UINT64_C(0xc000b000a0009000) )); 6413 Assert(fAlignMaskAndCtl <= 3); 6398 6414 break; 6399 6415 case 8: … … 6402 6418 : enmOp == kIemNativeEmitMemOp_Fetch ? (uintptr_t)iemNativeHlpMemFetchDataU64 6403 6419 : UINT64_C(0xc000b000a0009000) )); 6420 Assert(fAlignMaskAndCtl <= 7); 6404 6421 break; 6405 6422 #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR … … 6412 6429 && ( pfnFunction == (uintptr_t)iemNativeHlpMemStoreDataU128AlignedSse 6413 6430 || pfnFunction == (uintptr_t)iemNativeHlpMemStoreDataU128NoAc))); 6431 Assert( pfnFunction == (uintptr_t)iemNativeHlpMemFetchDataU128AlignedSse 6432 || pfnFunction == (uintptr_t)iemNativeHlpMemStoreDataU128AlignedSse 6433 ? (fAlignMaskAndCtl & (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)) && (uint8_t)fAlignMaskAndCtl == 15 6434 : fAlignMaskAndCtl <= 15); 6414 6435 break; 6415 6436 case sizeof(RTUINT256U): … … 6420 6441 && ( pfnFunction == (uintptr_t)iemNativeHlpMemStoreDataU256NoAc 6421 6442 || pfnFunction == (uintptr_t)iemNativeHlpMemStoreDataU256AlignedAvx))); 6443 Assert( pfnFunction == (uintptr_t)iemNativeHlpMemFetchDataU256AlignedAvx 6444 || pfnFunction == (uintptr_t)iemNativeHlpMemStoreDataU256AlignedAvx 6445 ? (fAlignMaskAndCtl & IEM_MEMMAP_F_ALIGN_GP) && (uint8_t)fAlignMaskAndCtl == 31 6446 : fAlignMaskAndCtl <= 31); 6422 6447 break; 6423 6448 #endif … … 6670 6695 * TlbLookup: 6671 6696 */ 6672 off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMem, fAlignMask ,6697 off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMem, fAlignMaskAndCtl, 6673 6698 enmOp == kIemNativeEmitMemOp_Store ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ, 6674 6699 idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult, offDisp); … … 6829 6854 } 6830 6855 #else 6831 RT_NOREF(fAlignMask , idxLabelTlbMiss);6856 RT_NOREF(fAlignMaskAndCtl, idxLabelTlbMiss); 6832 6857 #endif 6833 6858 … … 6846 6871 #define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \ 6847 6872 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u8Dst, a_iSeg, a_GCPtrMem, \ 6848 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Fetch, \6873 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Fetch, \ 6849 6874 (uintptr_t)iemNativeHlpMemFetchDataU8, pCallEntry->idxInstr) 6850 6875 6851 6876 #define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 6852 6877 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u16Dst, a_iSeg, a_GCPtrMem, \ 6853 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Fetch_Zx_U16, \6878 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Fetch_Zx_U16, \ 6854 6879 (uintptr_t)iemNativeHlpMemFetchDataU8, pCallEntry->idxInstr) 6855 6880 6856 6881 #define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 6857 6882 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, a_iSeg, a_GCPtrMem, \ 6858 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Fetch_Zx_U32, \6883 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Fetch_Zx_U32, \ 6859 6884 (uintptr_t)iemNativeHlpMemFetchDataU8, pCallEntry->idxInstr) 6860 6885 6861 6886 #define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 6862 6887 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, a_iSeg, a_GCPtrMem, \ 6863 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Fetch_Zx_U64, \6888 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Fetch_Zx_U64, \ 6864 6889 (uintptr_t)iemNativeHlpMemFetchDataU8, pCallEntry->idxInstr) 6865 6890 6866 6891 #define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 6867 6892 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u16Dst, a_iSeg, a_GCPtrMem, \ 6868 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Fetch_Sx_U16, \6893 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Fetch_Sx_U16, \ 6869 6894 (uintptr_t)iemNativeHlpMemFetchDataU8_Sx_U16, pCallEntry->idxInstr) 6870 6895 6871 6896 #define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 6872 6897 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, a_iSeg, a_GCPtrMem, \ 6873 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Fetch_Sx_U32, \6898 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Fetch_Sx_U32, \ 6874 6899 (uintptr_t)iemNativeHlpMemFetchDataU8_Sx_U32, pCallEntry->idxInstr) 6875 6900 6876 6901 #define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 6877 6902 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, a_iSeg, a_GCPtrMem, \ 6878 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Fetch_Sx_U64, \6903 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Fetch_Sx_U64, \ 6879 6904 (uintptr_t)iemNativeHlpMemFetchDataU8_Sx_U64, pCallEntry->idxInstr) 6880 6905 … … 6970 6995 #define IEM_MC_FETCH_MEM_FLAT_U8(a_u8Dst, a_GCPtrMem) \ 6971 6996 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u8Dst, UINT8_MAX, a_GCPtrMem, \ 6972 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Fetch, \6997 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Fetch, \ 6973 6998 (uintptr_t)iemNativeHlpMemFlatFetchDataU8, pCallEntry->idxInstr) 6974 6999 6975 7000 #define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16(a_u16Dst, a_GCPtrMem) \ 6976 7001 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u16Dst, UINT8_MAX, a_GCPtrMem, \ 6977 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Fetch_Zx_U16, \7002 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Fetch_Zx_U16, \ 6978 7003 (uintptr_t)iemNativeHlpMemFlatFetchDataU8, pCallEntry->idxInstr) 6979 7004 6980 7005 #define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32(a_u32Dst, a_GCPtrMem) \ 6981 7006 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, UINT8_MAX, a_GCPtrMem, \ 6982 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Fetch_Zx_U32, \7007 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Fetch_Zx_U32, \ 6983 7008 (uintptr_t)iemNativeHlpMemFlatFetchDataU8, pCallEntry->idxInstr) 6984 7009 6985 7010 #define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64(a_u64Dst, a_GCPtrMem) \ 6986 7011 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, UINT8_MAX, a_GCPtrMem, \ 6987 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Fetch_Zx_U64, \7012 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Fetch_Zx_U64, \ 6988 7013 (uintptr_t)iemNativeHlpMemFlatFetchDataU8, pCallEntry->idxInstr) 6989 7014 6990 7015 #define IEM_MC_FETCH_MEM_FLAT_U8_SX_U16(a_u16Dst, a_GCPtrMem) \ 6991 7016 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u16Dst, UINT8_MAX, a_GCPtrMem, \ 6992 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Fetch_Sx_U16, \7017 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Fetch_Sx_U16, \ 6993 7018 (uintptr_t)iemNativeHlpMemFlatFetchDataU8_Sx_U16, pCallEntry->idxInstr) 6994 7019 6995 7020 #define IEM_MC_FETCH_MEM_FLAT_U8_SX_U32(a_u32Dst, a_GCPtrMem) \ 6996 7021 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u32Dst, UINT8_MAX, a_GCPtrMem, \ 6997 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Fetch_Sx_U32, \7022 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Fetch_Sx_U32, \ 6998 7023 (uintptr_t)iemNativeHlpMemFlatFetchDataU8_Sx_U32, pCallEntry->idxInstr) 6999 7024 7000 7025 #define IEM_MC_FETCH_MEM_FLAT_U8_SX_U64(a_u64Dst, a_GCPtrMem) \ 7001 7026 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u64Dst, UINT8_MAX, a_GCPtrMem, \ 7002 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Fetch_Sx_U64, \7027 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Fetch_Sx_U64, \ 7003 7028 (uintptr_t)iemNativeHlpMemFlatFetchDataU8_Sx_U64, pCallEntry->idxInstr) 7004 7029 … … 7096 7121 7097 7122 #define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \ 7098 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u128Dst, a_iSeg, a_GCPtrMem, \ 7099 sizeof(RTUINT128U), sizeof(RTUINT128U) - 1, kIemNativeEmitMemOp_Fetch, \ 7123 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u128Dst, a_iSeg, a_GCPtrMem, sizeof(RTUINT128U), \ 7124 (sizeof(RTUINT128U) - 1U) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE, \ 7125 kIemNativeEmitMemOp_Fetch, \ 7100 7126 (uintptr_t)iemNativeHlpMemFetchDataU128AlignedSse, pCallEntry->idxInstr) 7101 7127 7102 7128 AssertCompileSize(X86XMMREG, sizeof(RTUINT128U)); 7103 7129 #define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_uXmmDst, a_iSeg, a_GCPtrMem) \ 7104 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_uXmmDst, a_iSeg, a_GCPtrMem, \ 7105 sizeof(X86XMMREG), sizeof(X86XMMREG) - 1, kIemNativeEmitMemOp_Fetch, \ 7130 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_uXmmDst, a_iSeg, a_GCPtrMem, sizeof(X86XMMREG), \ 7131 (sizeof(X86XMMREG) - 1U) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE, \ 7132 kIemNativeEmitMemOp_Fetch, \ 7106 7133 (uintptr_t)iemNativeHlpMemFetchDataU128AlignedSse, pCallEntry->idxInstr) 7107 7134 … … 7118 7145 7119 7146 #define IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE(a_u128Dst, a_GCPtrMem) \ 7120 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u128Dst, UINT8_MAX, a_GCPtrMem, \ 7121 sizeof(RTUINT128U), sizeof(RTUINT128U) - 1, kIemNativeEmitMemOp_Fetch, \ 7147 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u128Dst, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT128U), \ 7148 (sizeof(RTUINT128U) - 1U) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE, \ 7149 kIemNativeEmitMemOp_Fetch, \ 7122 7150 (uintptr_t)iemNativeHlpMemFlatFetchDataU128AlignedSse, pCallEntry->idxInstr) 7123 7151 7124 7152 #define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE(a_uXmmDst, a_GCPtrMem) \ 7125 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_uXmmDst, UINT8_MAX, a_GCPtrMem, \ 7126 sizeof(X86XMMREG), sizeof(X86XMMREG) - 1, kIemNativeEmitMemOp_Fetch, \ 7153 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_uXmmDst, UINT8_MAX, a_GCPtrMem, sizeof(X86XMMREG), \ 7154 (sizeof(X86XMMREG) - 1U) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE, \ 7155 kIemNativeEmitMemOp_Fetch, \ 7127 7156 (uintptr_t)iemNativeHlpMemFlatFetchDataU128AlignedSse, pCallEntry->idxInstr) 7128 7157 … … 7144 7173 7145 7174 #define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \ 7146 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u256Dst, a_iSeg, a_GCPtrMem, \7147 sizeof(RTUINT256U), sizeof(RTUINT256U) - 1, kIemNativeEmitMemOp_Fetch, \7175 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u256Dst, a_iSeg, a_GCPtrMem, sizeof(RTUINT256U), \ 7176 (sizeof(RTUINT256U) - 1U) | IEM_MEMMAP_F_ALIGN_GP, kIemNativeEmitMemOp_Fetch, \ 7148 7177 (uintptr_t)iemNativeHlpMemFetchDataU256AlignedAvx, pCallEntry->idxInstr) 7149 7178 … … 7161 7190 7162 7191 #define IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX(a_u256Dst, a_GCPtrMem) \ 7163 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u256Dst, UINT8_MAX, a_GCPtrMem, \7164 sizeof(RTUINT256U), sizeof(RTUINT256U) - 1, kIemNativeEmitMemOp_Fetch, \7192 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u256Dst, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT256U), \ 7193 (sizeof(RTUINT256U) - 1U) | IEM_MEMMAP_F_ALIGN_GP, kIemNativeEmitMemOp_Fetch, \ 7165 7194 (uintptr_t)iemNativeHlpMemFlatFetchDataU256AlignedAvx, pCallEntry->idxInstr) 7166 7195 #endif … … 7173 7202 #define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \ 7174 7203 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u8Value, a_iSeg, a_GCPtrMem, \ 7175 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Store, \7204 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Store, \ 7176 7205 (uintptr_t)iemNativeHlpMemStoreDataU8, pCallEntry->idxInstr) 7177 7206 … … 7194 7223 #define IEM_MC_STORE_MEM_FLAT_U8(a_GCPtrMem, a_u8Value) \ 7195 7224 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u8Value, UINT8_MAX, a_GCPtrMem, \ 7196 sizeof(uint8_t), 0 /*fAlignMask */, kIemNativeEmitMemOp_Store, \7225 sizeof(uint8_t), 0 /*fAlignMaskAndCtl*/, kIemNativeEmitMemOp_Store, \ 7197 7226 (uintptr_t)iemNativeHlpMemFlatStoreDataU8, pCallEntry->idxInstr) 7198 7227 … … 7267 7296 #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR 7268 7297 # define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \ 7269 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u128Value, a_iSeg, a_GCPtrMem, \ 7270 sizeof(RTUINT128U), sizeof(RTUINT128U) - 1, kIemNativeEmitMemOp_Store, \ 7298 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u128Value, a_iSeg, a_GCPtrMem, sizeof(RTUINT128U), \ 7299 (sizeof(RTUINT128U) - 1U) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE, \ 7300 kIemNativeEmitMemOp_Store, \ 7271 7301 (uintptr_t)iemNativeHlpMemStoreDataU128AlignedSse, pCallEntry->idxInstr) 7272 7302 … … 7282 7312 7283 7313 # define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \ 7284 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u256Value, a_iSeg, a_GCPtrMem, \7285 sizeof(RTUINT256U), sizeof(RTUINT256U) - 1, kIemNativeEmitMemOp_Store, \7314 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u256Value, a_iSeg, a_GCPtrMem, sizeof(RTUINT256U), \ 7315 (sizeof(RTUINT256U) - 1U) | IEM_MEMMAP_F_ALIGN_GP, kIemNativeEmitMemOp_Store, \ 7286 7316 (uintptr_t)iemNativeHlpMemStoreDataU256AlignedAvx, pCallEntry->idxInstr) 7287 7317 7288 7318 7289 7319 # define IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE(a_GCPtrMem, a_u128Value) \ 7290 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u128Value, UINT8_MAX, a_GCPtrMem, \ 7291 sizeof(RTUINT128U), sizeof(RTUINT128U) - 1, kIemNativeEmitMemOp_Store, \ 7320 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u128Value, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT128U), \ 7321 (sizeof(RTUINT128U) - 1U) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE, \ 7322 kIemNativeEmitMemOp_Store, \ 7292 7323 (uintptr_t)iemNativeHlpMemFlatStoreDataU128AlignedSse, pCallEntry->idxInstr) 7293 7324 … … 7303 7334 7304 7335 # define IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX(a_GCPtrMem, a_u256Value) \ 7305 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u256Value, UINT8_MAX, a_GCPtrMem, \7306 sizeof(RTUINT256U), sizeof(RTUINT256U) - 1, kIemNativeEmitMemOp_Store, \7336 off = iemNativeEmitMemFetchStoreDataCommon(pReNative, off, a_u256Value, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT256U), \ 7337 (sizeof(RTUINT256U) - 1U) | IEM_MEMMAP_F_ALIGN_GP, kIemNativeEmitMemOp_Store, \ 7307 7338 (uintptr_t)iemNativeHlpMemFlatStoreDataU256AlignedAvx, pCallEntry->idxInstr) 7308 7339 #endif … … 8023 8054 #define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8024 8055 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint8_t), \ 8025 IEM_ACCESS_DATA_ATOMIC, 0 /*fAlignMask */, \8056 IEM_ACCESS_DATA_ATOMIC, 0 /*fAlignMaskAndCtl*/, \ 8026 8057 (uintptr_t)iemNativeHlpMemMapDataU8Atomic, pCallEntry->idxInstr) 8027 8058 8028 8059 #define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8029 8060 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint8_t), \ 8030 IEM_ACCESS_DATA_RW, 0 /*fAlignMask */, \8061 IEM_ACCESS_DATA_RW, 0 /*fAlignMaskAndCtl*/, \ 8031 8062 (uintptr_t)iemNativeHlpMemMapDataU8Rw, pCallEntry->idxInstr) 8032 8063 8033 8064 #define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8034 8065 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint8_t), \ 8035 IEM_ACCESS_DATA_W, 0 /*fAlignMask */, \8066 IEM_ACCESS_DATA_W, 0 /*fAlignMaskAndCtl*/, \ 8036 8067 (uintptr_t)iemNativeHlpMemMapDataU8Wo, pCallEntry->idxInstr) \ 8037 8068 8038 8069 #define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8039 8070 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint8_t), \ 8040 IEM_ACCESS_DATA_R, 0 /*fAlignMask */, \8071 IEM_ACCESS_DATA_R, 0 /*fAlignMaskAndCtl*/, \ 8041 8072 (uintptr_t)iemNativeHlpMemMapDataU8Ro, pCallEntry->idxInstr) 8042 8073 … … 8044 8075 #define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8045 8076 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint16_t), \ 8046 IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1 /*fAlignMask */, \8077 IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1 /*fAlignMaskAndCtl*/, \ 8047 8078 (uintptr_t)iemNativeHlpMemMapDataU16Atomic, pCallEntry->idxInstr) 8048 8079 8049 8080 #define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8050 8081 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint16_t), \ 8051 IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1 /*fAlignMask */, \8082 IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1 /*fAlignMaskAndCtl*/, \ 8052 8083 (uintptr_t)iemNativeHlpMemMapDataU16Rw, pCallEntry->idxInstr) 8053 8084 8054 8085 #define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8055 8086 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint16_t), \ 8056 IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMask */, \8087 IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMaskAndCtl*/, \ 8057 8088 (uintptr_t)iemNativeHlpMemMapDataU16Wo, pCallEntry->idxInstr) \ 8058 8089 8059 8090 #define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8060 8091 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint16_t), \ 8061 IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1 /*fAlignMask */, \8092 IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1 /*fAlignMaskAndCtl*/, \ 8062 8093 (uintptr_t)iemNativeHlpMemMapDataU16Ro, pCallEntry->idxInstr) 8063 8094 8064 8095 #define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8065 8096 off = iemNativeEmitMemMapCommon(pReNative, off, a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(int16_t), \ 8066 IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMask */, \8097 IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMaskAndCtl*/, \ 8067 8098 (uintptr_t)iemNativeHlpMemMapDataU16Wo, pCallEntry->idxInstr) \ 8068 8099 … … 8070 8101 #define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8071 8102 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint32_t), \ 8072 IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1 /*fAlignMask */, \8103 IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1 /*fAlignMaskAndCtl*/, \ 8073 8104 (uintptr_t)iemNativeHlpMemMapDataU32Atomic, pCallEntry->idxInstr) 8074 8105 8075 8106 #define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8076 8107 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint32_t), \ 8077 IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1 /*fAlignMask */, \8108 IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1 /*fAlignMaskAndCtl*/, \ 8078 8109 (uintptr_t)iemNativeHlpMemMapDataU32Rw, pCallEntry->idxInstr) 8079 8110 8080 8111 #define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8081 8112 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint32_t), \ 8082 IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMask */, \8113 IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMaskAndCtl*/, \ 8083 8114 (uintptr_t)iemNativeHlpMemMapDataU32Wo, pCallEntry->idxInstr) \ 8084 8115 8085 8116 #define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8086 8117 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint32_t), \ 8087 IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1 /*fAlignMask */, \8118 IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1 /*fAlignMaskAndCtl*/, \ 8088 8119 (uintptr_t)iemNativeHlpMemMapDataU32Ro, pCallEntry->idxInstr) 8089 8120 8090 8121 #define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8091 8122 off = iemNativeEmitMemMapCommon(pReNative, off, a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(int32_t), \ 8092 IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMask */, \8123 IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMaskAndCtl*/, \ 8093 8124 (uintptr_t)iemNativeHlpMemMapDataU32Wo, pCallEntry->idxInstr) \ 8094 8125 … … 8096 8127 #define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8097 8128 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint64_t), \ 8098 IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1 /*fAlignMask */, \8129 IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1 /*fAlignMaskAndCtl*/, \ 8099 8130 (uintptr_t)iemNativeHlpMemMapDataU64Atomic, pCallEntry->idxInstr) 8100 8131 8101 8132 #define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8102 8133 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint64_t), \ 8103 IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1 /*fAlignMask */, \8134 IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1 /*fAlignMaskAndCtl*/, \ 8104 8135 (uintptr_t)iemNativeHlpMemMapDataU64Rw, pCallEntry->idxInstr) 8105 8136 #define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8106 8137 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint64_t), \ 8107 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask */, \8138 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMaskAndCtl*/, \ 8108 8139 (uintptr_t)iemNativeHlpMemMapDataU64Wo, pCallEntry->idxInstr) \ 8109 8140 8110 8141 #define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8111 8142 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(uint64_t), \ 8112 IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1 /*fAlignMask */, \8143 IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1 /*fAlignMaskAndCtl*/, \ 8113 8144 (uintptr_t)iemNativeHlpMemMapDataU64Ro, pCallEntry->idxInstr) 8114 8145 8115 8146 #define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8116 8147 off = iemNativeEmitMemMapCommon(pReNative, off, a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(int64_t), \ 8117 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask */, \8148 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMaskAndCtl*/, \ 8118 8149 (uintptr_t)iemNativeHlpMemMapDataU64Wo, pCallEntry->idxInstr) \ 8119 8150 … … 8121 8152 #define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8122 8153 off = iemNativeEmitMemMapCommon(pReNative, off, a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTFLOAT80U), \ 8123 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask */, \8154 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMaskAndCtl*/, \ 8124 8155 (uintptr_t)iemNativeHlpMemMapDataR80Wo, pCallEntry->idxInstr) \ 8125 8156 8126 8157 #define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8127 8158 off = iemNativeEmitMemMapCommon(pReNative, off, a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTFLOAT80U), \ 8128 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask */, /** @todo check BCD align */ \8159 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMaskAndCtl*/, /** @todo check BCD align */ \ 8129 8160 (uintptr_t)iemNativeHlpMemMapDataD80Wo, pCallEntry->idxInstr) \ 8130 8161 … … 8132 8163 #define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8133 8164 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTUINT128U), \ 8134 IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128U) - 1 /*fAlignMask */, \8165 IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128U) - 1 /*fAlignMaskAndCtl*/, \ 8135 8166 (uintptr_t)iemNativeHlpMemMapDataU128Atomic, pCallEntry->idxInstr) 8136 8167 8137 8168 #define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8138 8169 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTUINT128U), \ 8139 IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1 /*fAlignMask */, \8170 IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1 /*fAlignMaskAndCtl*/, \ 8140 8171 (uintptr_t)iemNativeHlpMemMapDataU128Rw, pCallEntry->idxInstr) 8141 8172 8142 8173 #define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8143 8174 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTUINT128U), \ 8144 IEM_ACCESS_DATA_W, sizeof(RTUINT128U) - 1 /*fAlignMask */, \8175 IEM_ACCESS_DATA_W, sizeof(RTUINT128U) - 1 /*fAlignMaskAndCtl*/, \ 8145 8176 (uintptr_t)iemNativeHlpMemMapDataU128Wo, pCallEntry->idxInstr) \ 8146 8177 8147 8178 #define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 8148 8179 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem, sizeof(RTUINT128U), \ 8149 IEM_ACCESS_DATA_R, sizeof(RTUINT128U) - 1 /*fAlignMask */, \8180 IEM_ACCESS_DATA_R, sizeof(RTUINT128U) - 1 /*fAlignMaskAndCtl*/, \ 8150 8181 (uintptr_t)iemNativeHlpMemMapDataU128Ro, pCallEntry->idxInstr) 8151 8182 … … 8154 8185 #define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 8155 8186 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint8_t), \ 8156 IEM_ACCESS_DATA_ATOMIC, 0 /*fAlignMask */, \8187 IEM_ACCESS_DATA_ATOMIC, 0 /*fAlignMaskAndCtl*/, \ 8157 8188 (uintptr_t)iemNativeHlpMemFlatMapDataU8Atomic, pCallEntry->idxInstr) 8158 8189 8159 8190 #define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 8160 8191 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint8_t), \ 8161 IEM_ACCESS_DATA_RW, 0 /*fAlignMask */, \8192 IEM_ACCESS_DATA_RW, 0 /*fAlignMaskAndCtl*/, \ 8162 8193 (uintptr_t)iemNativeHlpMemFlatMapDataU8Rw, pCallEntry->idxInstr) 8163 8194 8164 8195 #define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 8165 8196 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint8_t), \ 8166 IEM_ACCESS_DATA_W, 0 /*fAlignMask */, \8197 IEM_ACCESS_DATA_W, 0 /*fAlignMaskAndCtl*/, \ 8167 8198 (uintptr_t)iemNativeHlpMemFlatMapDataU8Wo, pCallEntry->idxInstr) \ 8168 8199 8169 8200 #define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 8170 8201 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu8Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint8_t), \ 8171 IEM_ACCESS_DATA_R, 0 /*fAlignMask */, \8202 IEM_ACCESS_DATA_R, 0 /*fAlignMaskAndCtl*/, \ 8172 8203 (uintptr_t)iemNativeHlpMemFlatMapDataU8Ro, pCallEntry->idxInstr) 8173 8204 … … 8175 8206 #define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 8176 8207 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint16_t), \ 8177 IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1 /*fAlignMask */, \8208 IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1 /*fAlignMaskAndCtl*/, \ 8178 8209 (uintptr_t)iemNativeHlpMemFlatMapDataU16Atomic, pCallEntry->idxInstr) 8179 8210 8180 8211 #define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 8181 8212 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint16_t), \ 8182 IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1 /*fAlignMask */, \8213 IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1 /*fAlignMaskAndCtl*/, \ 8183 8214 (uintptr_t)iemNativeHlpMemFlatMapDataU16Rw, pCallEntry->idxInstr) 8184 8215 8185 8216 #define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 8186 8217 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint16_t), \ 8187 IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMask */, \8218 IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMaskAndCtl*/, \ 8188 8219 (uintptr_t)iemNativeHlpMemFlatMapDataU16Wo, pCallEntry->idxInstr) \ 8189 8220 8190 8221 #define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 8191 8222 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu16Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint16_t), \ 8192 IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1 /*fAlignMask */, \8223 IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1 /*fAlignMaskAndCtl*/, \ 8193 8224 (uintptr_t)iemNativeHlpMemFlatMapDataU16Ro, pCallEntry->idxInstr) 8194 8225 8195 8226 #define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \ 8196 8227 off = iemNativeEmitMemMapCommon(pReNative, off, a_pi16Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(int16_t), \ 8197 IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMask */, \8228 IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1 /*fAlignMaskAndCtl*/, \ 8198 8229 (uintptr_t)iemNativeHlpMemFlatMapDataU16Wo, pCallEntry->idxInstr) \ 8199 8230 … … 8201 8232 #define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 8202 8233 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint32_t), \ 8203 IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1 /*fAlignMask */, \8234 IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1 /*fAlignMaskAndCtl*/, \ 8204 8235 (uintptr_t)iemNativeHlpMemFlatMapDataU32Atomic, pCallEntry->idxInstr) 8205 8236 8206 8237 #define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 8207 8238 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint32_t), \ 8208 IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1 /*fAlignMask */, \8239 IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1 /*fAlignMaskAndCtl*/, \ 8209 8240 (uintptr_t)iemNativeHlpMemFlatMapDataU32Rw, pCallEntry->idxInstr) 8210 8241 8211 8242 #define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 8212 8243 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint32_t), \ 8213 IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMask */, \8244 IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMaskAndCtl*/, \ 8214 8245 (uintptr_t)iemNativeHlpMemFlatMapDataU32Wo, pCallEntry->idxInstr) \ 8215 8246 8216 8247 #define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 8217 8248 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu32Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint32_t), \ 8218 IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1 /*fAlignMask */, \8249 IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1 /*fAlignMaskAndCtl*/, \ 8219 8250 (uintptr_t)iemNativeHlpMemFlatMapDataU32Ro, pCallEntry->idxInstr) 8220 8251 8221 8252 #define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \ 8222 8253 off = iemNativeEmitMemMapCommon(pReNative, off, a_pi32Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(int32_t), \ 8223 IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMask */, \8254 IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1 /*fAlignMaskAndCtl*/, \ 8224 8255 (uintptr_t)iemNativeHlpMemFlatMapDataU32Wo, pCallEntry->idxInstr) \ 8225 8256 … … 8227 8258 #define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 8228 8259 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint64_t), \ 8229 IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1 /*fAlignMask */, \8260 IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1 /*fAlignMaskAndCtl*/, \ 8230 8261 (uintptr_t)iemNativeHlpMemFlatMapDataU64Atomic, pCallEntry->idxInstr) 8231 8262 8232 8263 #define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 8233 8264 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint64_t), \ 8234 IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1 /*fAlignMask */, \8265 IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1 /*fAlignMaskAndCtl*/, \ 8235 8266 (uintptr_t)iemNativeHlpMemFlatMapDataU64Rw, pCallEntry->idxInstr) 8236 8267 8237 8268 #define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 8238 8269 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint64_t), \ 8239 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask */, \8270 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMaskAndCtl*/, \ 8240 8271 (uintptr_t)iemNativeHlpMemFlatMapDataU64Wo, pCallEntry->idxInstr) \ 8241 8272 8242 8273 #define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 8243 8274 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu64Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(uint64_t), \ 8244 IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1 /*fAlignMask */, \8275 IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1 /*fAlignMaskAndCtl*/, \ 8245 8276 (uintptr_t)iemNativeHlpMemFlatMapDataU64Ro, pCallEntry->idxInstr) 8246 8277 8247 8278 #define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \ 8248 8279 off = iemNativeEmitMemMapCommon(pReNative, off, a_pi64Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(int64_t), \ 8249 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask */, \8280 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMaskAndCtl*/, \ 8250 8281 (uintptr_t)iemNativeHlpMemFlatMapDataU64Wo, pCallEntry->idxInstr) \ 8251 8282 … … 8253 8284 #define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \ 8254 8285 off = iemNativeEmitMemMapCommon(pReNative, off, a_pr80Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTFLOAT80U), \ 8255 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask */, \8286 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMaskAndCtl*/, \ 8256 8287 (uintptr_t)iemNativeHlpMemFlatMapDataR80Wo, pCallEntry->idxInstr) \ 8257 8288 8258 8289 #define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \ 8259 8290 off = iemNativeEmitMemMapCommon(pReNative, off, a_pd80Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTFLOAT80U), \ 8260 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMask */, /** @todo check BCD align */ \8291 IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1 /*fAlignMaskAndCtl*/, /** @todo check BCD align */ \ 8261 8292 (uintptr_t)iemNativeHlpMemFlatMapDataD80Wo, pCallEntry->idxInstr) \ 8262 8293 … … 8264 8295 #define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 8265 8296 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT128U), \ 8266 IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128U) - 1 /*fAlignMask */, \8297 IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128U) - 1 /*fAlignMaskAndCtl*/, \ 8267 8298 (uintptr_t)iemNativeHlpMemFlatMapDataU128Atomic, pCallEntry->idxInstr) 8268 8299 8269 8300 #define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 8270 8301 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT128U), \ 8271 IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1 /*fAlignMask */, \8302 IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1 /*fAlignMaskAndCtl*/, \ 8272 8303 (uintptr_t)iemNativeHlpMemFlatMapDataU128Rw, pCallEntry->idxInstr) 8273 8304 8274 8305 #define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 8275 8306 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT128U), \ 8276 IEM_ACCESS_DATA_W, sizeof(RTUINT128U) - 1 /*fAlignMask */, \8307 IEM_ACCESS_DATA_W, sizeof(RTUINT128U) - 1 /*fAlignMaskAndCtl*/, \ 8277 8308 (uintptr_t)iemNativeHlpMemFlatMapDataU128Wo, pCallEntry->idxInstr) \ 8278 8309 8279 8310 #define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 8280 8311 off = iemNativeEmitMemMapCommon(pReNative, off, a_pu128Mem, a_bUnmapInfo, UINT8_MAX, a_GCPtrMem, sizeof(RTUINT128U), \ 8281 IEM_ACCESS_DATA_R, sizeof(RTUINT128U) - 1 /*fAlignMask */, \8312 IEM_ACCESS_DATA_R, sizeof(RTUINT128U) - 1 /*fAlignMaskAndCtl*/, \ 8282 8313 (uintptr_t)iemNativeHlpMemFlatMapDataU128Ro, pCallEntry->idxInstr) 8283 8314 … … 8285 8316 DECL_INLINE_THROW(uint32_t) 8286 8317 iemNativeEmitMemMapCommon(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarMem, uint8_t idxVarUnmapInfo, 8287 uint8_t iSegReg, uint8_t idxVarGCPtrMem, uint8_t cbMem, uint32_t fAccess, uint 8_t fAlignMask,8318 uint8_t iSegReg, uint8_t idxVarGCPtrMem, uint8_t cbMem, uint32_t fAccess, uint32_t fAlignMaskAndCtl, 8288 8319 uintptr_t pfnFunction, uint8_t idxInstr) 8289 8320 { … … 8331 8362 switch (cbMem) 8332 8363 { 8333 case 1: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU8)); break; 8334 case 2: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU16)); break; 8335 case 4: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU32)); break; 8336 case 8: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU64)); break; 8364 case 1: 8365 Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU8)); 8366 Assert(!fAlignMaskAndCtl); 8367 break; 8368 case 2: 8369 Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU16)); 8370 Assert(fAlignMaskAndCtl < 2); 8371 break; 8372 case 4: 8373 Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU32)); 8374 Assert(fAlignMaskAndCtl < 4); 8375 break; 8376 case 8: 8377 Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU64)); 8378 Assert(fAlignMaskAndCtl < 8); 8379 break; 8337 8380 case 10: 8338 8381 Assert( pfnFunction == (uintptr_t)iemNativeHlpMemFlatMapDataR80Wo 8339 8382 || pfnFunction == (uintptr_t)iemNativeHlpMemFlatMapDataD80Wo); 8340 8383 Assert((fAccess & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE); 8384 Assert(fAlignMaskAndCtl < 8); 8341 8385 break; 8342 case 16: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU128)); break; 8386 case 16: 8387 Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemFlatMapDataU128)); 8388 Assert(fAlignMaskAndCtl < 16); 8389 break; 8343 8390 # if 0 8344 case 32: Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemFlatMapDataU256)); break; 8345 case 64: Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemFlatMapDataU512)); break; 8391 case 32: 8392 Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemFlatMapDataU256)); 8393 Assert(fAlignMaskAndCtl < 32); 8394 break; 8395 case 64: 8396 Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemFlatMapDataU512)); 8397 Assert(fAlignMaskAndCtl < 64); 8398 break; 8346 8399 # endif 8347 8400 default: AssertFailed(); break; … … 8353 8406 switch (cbMem) 8354 8407 { 8355 case 1: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU8)); break; 8356 case 2: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU16)); break; 8357 case 4: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU32)); break; 8358 case 8: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU64)); break; 8408 case 1: 8409 Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU8)); 8410 Assert(!fAlignMaskAndCtl); 8411 break; 8412 case 2: 8413 Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU16)); 8414 Assert(fAlignMaskAndCtl < 2); 8415 break; 8416 case 4: 8417 Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU32)); 8418 Assert(fAlignMaskAndCtl < 4); 8419 break; 8420 case 8: 8421 Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU64)); 8422 Assert(fAlignMaskAndCtl < 8); 8423 break; 8359 8424 case 10: 8360 8425 Assert( pfnFunction == (uintptr_t)iemNativeHlpMemMapDataR80Wo 8361 8426 || pfnFunction == (uintptr_t)iemNativeHlpMemMapDataD80Wo); 8362 8427 Assert((fAccess & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE); 8428 Assert(fAlignMaskAndCtl < 8); 8363 8429 break; 8364 case 16: Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU128)); break; 8430 case 16: 8431 Assert(pfnFunction == IEM_MAP_HLP_FN(fAccess, iemNativeHlpMemMapDataU128)); 8432 Assert(fAlignMaskAndCtl < 16); 8433 break; 8365 8434 # if 0 8366 case 32: Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemMapDataU256)); break; 8367 case 64: Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemMapDataU512)); break; 8435 case 32: 8436 Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemMapDataU256)); 8437 Assert(fAlignMaskAndCtl < 32); 8438 break; 8439 case 64: 8440 Assert(pfnFunction == IEM_MAP_HLP_FN_NO_AT(fAccess, iemNativeHlpMemMapDataU512)); 8441 Assert(fAlignMaskAndCtl < 64); 8442 break; 8368 8443 # endif 8369 8444 default: AssertFailed(); break; … … 8495 8570 * TlbLookup: 8496 8571 */ 8497 off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMem, fAlignMask , fAccess,8572 off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMem, fAlignMaskAndCtl, fAccess, 8498 8573 idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult); 8499 8574 # ifdef IEM_WITH_TLB_STATISTICS … … 8518 8593 } 8519 8594 #else 8520 RT_NOREF(fAccess, fAlignMask , idxLabelTlbMiss);8595 RT_NOREF(fAccess, fAlignMaskAndCtl, idxLabelTlbMiss); 8521 8596 #endif 8522 8597 -
trunk/src/VBox/VMM/include/IEMInline.h
r104424 r104984 172 172 173 173 /** 174 * Calculates the IEM_F_X86_AC flags. 175 * 176 * @returns IEM_F_X86_AC or zero 177 * @param pVCpu The cross context virtual CPU structure of the 178 * calling thread. 179 */ 180 DECL_FORCE_INLINE(uint32_t) iemCalcExecAcFlag(PVMCPUCC pVCpu) RT_NOEXCEPT 181 { 182 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS); 183 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss)); 184 185 if ( !pVCpu->cpum.GstCtx.eflags.Bits.u1AC 186 || (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_AM | X86_CR0_PE)) != (X86_CR0_AM | X86_CR0_PE) 187 || ( !pVCpu->cpum.GstCtx.eflags.Bits.u1VM 188 && pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl != 3)) 189 return 0; 190 return IEM_F_X86_AC; 191 } 192 193 194 /** 174 195 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag. 175 196 * … … 246 267 247 268 /** 248 * Calculates the IEM_F_MODE_XXX and CPLflags.249 * 250 * @returns IEM_F_MODE_XXX 269 * Calculates the IEM_F_MODE_XXX, CPL and AC flags. 270 * 271 * @returns IEM_F_MODE_XXX, IEM_F_X86_CPL_MASK and IEM_F_X86_AC. 251 272 * @param pVCpu The cross context virtual CPU structure of the 252 273 * calling thread. … … 266 287 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss)); 267 288 uint32_t fExec = ((uint32_t)pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl << IEM_F_X86_CPL_SHIFT); 289 if ( !pVCpu->cpum.GstCtx.eflags.Bits.u1AC 290 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM) 291 || fExec != (3U << IEM_F_X86_CPL_SHIFT)) 292 { /* likely */ } 293 else 294 fExec |= IEM_F_X86_AC; 295 268 296 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) 269 297 { … … 280 308 return fExec; 281 309 } 282 return IEM_F_MODE_X86_16BIT_PROT_V86 | (UINT32_C(3) << IEM_F_X86_CPL_SHIFT); 310 if ( !pVCpu->cpum.GstCtx.eflags.Bits.u1AC 311 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)) 312 return IEM_F_MODE_X86_16BIT_PROT_V86 | (UINT32_C(3) << IEM_F_X86_CPL_SHIFT); 313 return IEM_F_MODE_X86_16BIT_PROT_V86 | (UINT32_C(3) << IEM_F_X86_CPL_SHIFT) | IEM_F_X86_AC; 283 314 } 284 315 … … 374 405 * calling thread. 375 406 */ 376 DECL_FORCE_INLINE(void) iemRecalcExecModeAndCpl Flags(PVMCPUCC pVCpu)377 { 378 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK ))407 DECL_FORCE_INLINE(void) iemRecalcExecModeAndCplAndAcFlags(PVMCPUCC pVCpu) 408 { 409 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC)) 379 410 | iemCalcExecModeAndCplFlags(pVCpu); 380 411 } … … 4638 4669 4639 4670 4640 /* Every template re yling on unaligned accesses inside a page not being okay should go below. */4671 /* Every template relying on unaligned accesses inside a page not being okay should go below. */ 4641 4672 #undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK 4642 4673 #define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) 0 -
trunk/src/VBox/VMM/include/IEMInternal.h
r104956 r104984 842 842 /** X86: The current protection level (CPL) shifted mask. */ 843 843 #define IEM_F_X86_CPL_SMASK UINT32_C(0x00000003) 844 845 /** X86: Alignment checks enabled (CR0.AM=1 & EFLAGS.AC=1). */ 846 #define IEM_F_X86_AC UINT32_C(0x00080000) 844 847 845 848 /** X86 execution context. -
trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h
r104798 r104984 632 632 } 633 633 634 635 634 /** 636 635 * Emits a 32-bit GPR load of a VCpu value. … … 638 637 */ 639 638 DECL_INLINE_THROW(uint32_t) 639 iemNativeEmitLoadGprFromVCpuU32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 640 { 641 #ifdef RT_ARCH_AMD64 642 /* mov reg32, mem32 */ 643 if (iGpr >= 8) 644 pCodeBuf[off++] = X86_OP_REX_R; 645 pCodeBuf[off++] = 0x8b; 646 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, iGpr, offVCpu); 647 648 #elif defined(RT_ARCH_ARM64) 649 off = iemNativeEmitGprByVCpuLdStEx(pCodeBuf, off, iGpr, offVCpu, kArmv8A64InstrLdStType_Ld_Word, sizeof(uint32_t)); 650 651 #else 652 # error "port me" 653 #endif 654 return off; 655 } 656 657 658 /** 659 * Emits a 32-bit GPR load of a VCpu value. 660 * @note Bits 32 thru 63 in the GPR will be zero after the operation. 661 */ 662 DECL_INLINE_THROW(uint32_t) 640 663 iemNativeEmitLoadGprFromVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 641 664 { 642 665 #ifdef RT_ARCH_AMD64 643 /* mov reg32, mem32 */ 644 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 645 if (iGpr >= 8) 646 pbCodeBuf[off++] = X86_OP_REX_R; 647 pbCodeBuf[off++] = 0x8b; 648 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu); 666 off = iemNativeEmitLoadGprFromVCpuU32Ex(iemNativeInstrBufEnsure(pReNative, off, 7), off, iGpr, offVCpu); 649 667 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 650 668 … … 6952 6970 */ 6953 6971 DECL_INLINE_THROW(uint32_t) 6954 iemNativeEmitTestBitInGprAndJmpTo LabelIfCc(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc,6955 uint8_t iBitNo, uint32_t idxLabel, bool fJmpIfSet)6972 iemNativeEmitTestBitInGprAndJmpToFixedIfCcEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprSrc, uint8_t iBitNo, 6973 uint32_t offTarget, uint32_t *poffFixup, bool fJmpIfSet) 6956 6974 { 6957 6975 Assert(iBitNo < 64); 6958 6976 #ifdef RT_ARCH_AMD64 6959 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);6960 6977 if (iBitNo < 8) 6961 6978 { 6962 6979 /* test Eb, imm8 */ 6963 6980 if (iGprSrc >= 4) 6964 pbCodeBuf[off++] = iGprSrc >= 8 ? X86_OP_REX_B : X86_OP_REX; 6965 pbCodeBuf[off++] = 0xf6; 6966 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprSrc & 7); 6967 pbCodeBuf[off++] = (uint8_t)1 << iBitNo; 6968 off = iemNativeEmitJccToLabel(pReNative, off, idxLabel, fJmpIfSet ? kIemNativeInstrCond_ne : kIemNativeInstrCond_e); 6981 pCodeBuf[off++] = iGprSrc >= 8 ? X86_OP_REX_B : X86_OP_REX; 6982 pCodeBuf[off++] = 0xf6; 6983 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprSrc & 7); 6984 pCodeBuf[off++] = (uint8_t)1 << iBitNo; 6985 if (poffFixup) 6986 *poffFixup = off; 6987 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, offTarget, fJmpIfSet ? kIemNativeInstrCond_ne : kIemNativeInstrCond_e); 6969 6988 } 6970 6989 else … … 6972 6991 /* bt Ev, imm8 */ 6973 6992 if (iBitNo >= 32) 6974 p bCodeBuf[off++] = X86_OP_REX_W | (iGprSrc < 8 ? 0 : X86_OP_REX_B);6993 pCodeBuf[off++] = X86_OP_REX_W | (iGprSrc < 8 ? 0 : X86_OP_REX_B); 6975 6994 else if (iGprSrc >= 8) 6976 pbCodeBuf[off++] = X86_OP_REX_B; 6977 pbCodeBuf[off++] = 0x0f; 6978 pbCodeBuf[off++] = 0xba; 6979 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprSrc & 7); 6980 pbCodeBuf[off++] = iBitNo; 6981 off = iemNativeEmitJccToLabel(pReNative, off, idxLabel, fJmpIfSet ? kIemNativeInstrCond_c : kIemNativeInstrCond_nc); 6995 pCodeBuf[off++] = X86_OP_REX_B; 6996 pCodeBuf[off++] = 0x0f; 6997 pCodeBuf[off++] = 0xba; 6998 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprSrc & 7); 6999 pCodeBuf[off++] = iBitNo; 7000 if (poffFixup) 7001 *poffFixup = off; 7002 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, offTarget, fJmpIfSet ? kIemNativeInstrCond_c : kIemNativeInstrCond_nc); 7003 } 7004 7005 #elif defined(RT_ARCH_ARM64) 7006 /* Just use the TBNZ instruction here. */ 7007 if (poffFixup) 7008 *poffFixup = off; 7009 pCodeBuf[off++] = Armv8A64MkInstrTbzTbnz(fJmpIfSet, off - offTarget, iGprSrc, iBitNo); 7010 7011 #else 7012 # error "Port me!" 7013 #endif 7014 return off; 7015 } 7016 7017 7018 /** 7019 * Emits a jump to @a idxTarget on the condition that bit @a iBitNo _is_ _set_ 7020 * in @a iGprSrc. 7021 */ 7022 DECL_INLINE_THROW(uint32_t) 7023 iemNativeEmitTestBitInGprAndJmpToFixedIfSetEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprSrc, uint8_t iBitNo, 7024 uint32_t offTarget, uint32_t *poffFixup) 7025 { 7026 return iemNativeEmitTestBitInGprAndJmpToFixedIfCcEx(pCodeBuf, off, iGprSrc, iBitNo, offTarget, poffFixup, true /*fJmpIfSet*/); 7027 } 7028 7029 7030 /** 7031 * Emits a jump to @a idxTarget on the condition that bit @a iBitNo _is_ _not_ 7032 * _set_ in @a iGprSrc. 7033 */ 7034 DECL_INLINE_THROW(uint32_t) 7035 iemNativeEmitTestBitInGprAndJmpToLabelIfNotSetEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprSrc, uint8_t iBitNo, 7036 uint32_t offTarget, uint32_t *poffFixup) 7037 { 7038 return iemNativeEmitTestBitInGprAndJmpToFixedIfCcEx(pCodeBuf, off, iGprSrc, iBitNo, offTarget, poffFixup, false /*fJmpIfSet*/); 7039 } 7040 7041 7042 7043 /** 7044 * Internal helper, don't call directly. 7045 */ 7046 DECL_INLINE_THROW(uint32_t) 7047 iemNativeEmitTestBitInGprAndJmpToLabelIfCcEx(PIEMRECOMPILERSTATE pReNative, PIEMNATIVEINSTR pCodeBuf, uint32_t off, 7048 uint8_t iGprSrc, uint8_t iBitNo, uint32_t idxLabel, bool fJmpIfSet) 7049 { 7050 Assert(iBitNo < 64); 7051 #ifdef RT_ARCH_AMD64 7052 if (iBitNo < 8) 7053 { 7054 /* test Eb, imm8 */ 7055 if (iGprSrc >= 4) 7056 pCodeBuf[off++] = iGprSrc >= 8 ? X86_OP_REX_B : X86_OP_REX; 7057 pCodeBuf[off++] = 0xf6; 7058 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprSrc & 7); 7059 pCodeBuf[off++] = (uint8_t)1 << iBitNo; 7060 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabel, 7061 fJmpIfSet ? kIemNativeInstrCond_ne : kIemNativeInstrCond_e); 7062 } 7063 else 7064 { 7065 /* bt Ev, imm8 */ 7066 if (iBitNo >= 32) 7067 pCodeBuf[off++] = X86_OP_REX_W | (iGprSrc < 8 ? 0 : X86_OP_REX_B); 7068 else if (iGprSrc >= 8) 7069 pCodeBuf[off++] = X86_OP_REX_B; 7070 pCodeBuf[off++] = 0x0f; 7071 pCodeBuf[off++] = 0xba; 7072 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprSrc & 7); 7073 pCodeBuf[off++] = iBitNo; 7074 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabel, 7075 fJmpIfSet ? kIemNativeInstrCond_c : kIemNativeInstrCond_nc); 6982 7076 } 6983 7077 6984 7078 #elif defined(RT_ARCH_ARM64) 6985 7079 /* Use the TBNZ instruction here. */ 6986 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);6987 7080 if (pReNative->paLabels[idxLabel].enmType > kIemNativeLabelType_LastWholeTbBranch) 6988 7081 { … … 6993 7086 { 6994 7087 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm14At5); 6995 p u32CodeBuf[off++] = Armv8A64MkInstrTbzTbnz(fJmpIfSet, 0, iGprSrc, iBitNo);7088 pCodeBuf[off++] = Armv8A64MkInstrTbzTbnz(fJmpIfSet, 0, iGprSrc, iBitNo); 6996 7089 } 6997 7090 //else … … 6999 7092 // RT_BREAKPOINT(); 7000 7093 // Assert(off - offLabel <= 0x1fffU); 7001 // p u32CodeBuf[off++] = Armv8A64MkInstrTbzTbnz(fJmpIfSet, offLabel - off, iGprSrc, iBitNo);7094 // pCodeBuf[off++] = Armv8A64MkInstrTbzTbnz(fJmpIfSet, offLabel - off, iGprSrc, iBitNo); 7002 7095 // 7003 7096 //} … … 7006 7099 { 7007 7100 Assert(Armv8A64ConvertImmRImmS2Mask64(0x40, (64U - iBitNo) & 63U) == RT_BIT_64(iBitNo)); 7008 p u32CodeBuf[off++] = Armv8A64MkInstrTstImm(iGprSrc, 0x40, (64U - iBitNo) & 63U);7101 pCodeBuf[off++] = Armv8A64MkInstrTstImm(iGprSrc, 0x40, (64U - iBitNo) & 63U); 7009 7102 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5); 7010 p u32CodeBuf[off++] = Armv8A64MkInstrBCond(fJmpIfSet ? kArmv8InstrCond_Ne : kArmv8InstrCond_Eq, 0);7103 pCodeBuf[off++] = Armv8A64MkInstrBCond(fJmpIfSet ? kArmv8InstrCond_Ne : kArmv8InstrCond_Eq, 0); 7011 7104 } 7012 7105 … … 7014 7107 # error "Port me!" 7015 7108 #endif 7016 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);7017 7109 return off; 7018 7110 } … … 7022 7114 * Emits a jump to @a idxLabel on the condition that bit @a iBitNo _is_ _set_ in 7023 7115 * @a iGprSrc. 7024 * 7025 * @note On ARM64 the range is only +/-8191 instructions. 7116 */ 7117 DECL_INLINE_THROW(uint32_t) 7118 iemNativeEmitTestBitInGprAndJmpToLabelIfSetEx(PIEMRECOMPILERSTATE pReNative, PIEMNATIVEINSTR pCodeBuf, uint32_t off, 7119 uint8_t iGprSrc, uint8_t iBitNo, uint32_t idxLabel) 7120 { 7121 return iemNativeEmitTestBitInGprAndJmpToLabelIfCcEx(pReNative, pCodeBuf, off, iGprSrc, iBitNo, idxLabel, true /*fJmpIfSet*/); 7122 } 7123 7124 7125 /** 7126 * Emits a jump to @a idxLabel on the condition that bit @a iBitNo _is_ _not_ 7127 * _set_ in @a iGprSrc. 7128 */ 7129 DECL_INLINE_THROW(uint32_t) 7130 iemNativeEmitTestBitInGprAndJmpToLabelIfNotSetEx(PIEMRECOMPILERSTATE pReNative, PIEMNATIVEINSTR pCodeBuf, uint32_t off, 7131 uint8_t iGprSrc, uint8_t iBitNo, uint32_t idxLabel) 7132 { 7133 return iemNativeEmitTestBitInGprAndJmpToLabelIfCcEx(pReNative, pCodeBuf, off, iGprSrc, iBitNo, idxLabel, false /*fJmpIfSet*/); 7134 } 7135 7136 7137 /** 7138 * Internal helper, don't call directly. 7139 */ 7140 DECL_INLINE_THROW(uint32_t) 7141 iemNativeEmitTestBitInGprAndJmpToLabelIfCc(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc, 7142 uint8_t iBitNo, uint32_t idxLabel, bool fJmpIfSet) 7143 { 7144 #ifdef RT_ARCH_AMD64 7145 off = iemNativeEmitTestBitInGprAndJmpToLabelIfCcEx(pReNative, iemNativeInstrBufEnsure(pReNative, off, 5+6), off, 7146 iGprSrc, iBitNo, idxLabel, fJmpIfSet); 7147 #elif defined(RT_ARCH_ARM64) 7148 off = iemNativeEmitTestBitInGprAndJmpToLabelIfCcEx(pReNative, iemNativeInstrBufEnsure(pReNative, off, 2), off, 7149 iGprSrc, iBitNo, idxLabel, fJmpIfSet); 7150 #else 7151 # error "Port me!" 7152 #endif 7153 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 7154 return off; 7155 } 7156 7157 7158 /** 7159 * Emits a jump to @a idxLabel on the condition that bit @a iBitNo _is_ _set_ in 7160 * @a iGprSrc. 7026 7161 */ 7027 7162 DECL_INLINE_THROW(uint32_t) iemNativeEmitTestBitInGprAndJmpToLabelIfSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, … … 7035 7170 * Emits a jump to @a idxLabel on the condition that bit @a iBitNo _is_ _not_ 7036 7171 * _set_ in @a iGprSrc. 7037 *7038 * @note On ARM64 the range is only +/-8191 instructions.7039 7172 */ 7040 7173 DECL_INLINE_THROW(uint32_t) iemNativeEmitTestBitInGprAndJmpToLabelIfNotSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, -
trunk/src/VBox/VMM/include/IEMN8veRecompilerTlbLookup.h
r104956 r104984 298 298 299 299 #ifdef IEMNATIVE_WITH_TLB_LOOKUP 300 /** 301 * 302 * @returns New @a off value. 303 * @param pReNative . 304 * @param off . 305 * @param pTlbState . 306 * @param iSegReg . 307 * @param cbMem . 308 * @param fAlignMaskAndCtl The low 8-bit is the alignment mask, ie. a 309 * 128-bit aligned access passes 15. This is only 310 * applied to ring-3 code, when dictated by the 311 * control bits and for atomic accesses. 312 * 313 * The other bits are used for alignment control: 314 * - IEM_MEMMAP_F_ALIGN_GP 315 * - IEM_MEMMAP_F_ALIGN_SSE 316 * - IEM_MEMMAP_F_ALIGN_GP_OR_AC 317 * Any non-zero upper bits means we will go to 318 * tlbmiss on anything out of alignment according 319 * to the mask in the low 8 bits. 320 * @param fAccess . 321 * @param idxLabelTlbLookup . 322 * @param idxLabelTlbMiss . 323 * @param idxRegMemResult . 324 * @param offDisp . 325 * @tparam a_fDataTlb . 326 * @tparam a_fNoReturn . 327 */ 300 328 template<bool const a_fDataTlb, bool const a_fNoReturn = false> 301 329 DECL_INLINE_THROW(uint32_t) 302 330 iemNativeEmitTlbLookup(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVEEMITTLBSTATE const * const pTlbState, 303 uint8_t iSegReg, uint8_t cbMem, uint8_t fAlignMask , uint32_t fAccess,331 uint8_t iSegReg, uint8_t cbMem, uint8_t fAlignMaskAndCtl, uint32_t fAccess, 304 332 uint32_t idxLabelTlbLookup, uint32_t idxLabelTlbMiss, uint8_t idxRegMemResult, 305 333 uint8_t offDisp = 0) … … 535 563 536 564 /* 537 * 2. Check that the address doesn't cross a page boundrary and doesn't have alignment issues. 565 * 2. Check that the address doesn't cross a page boundrary and doesn't 566 * have alignment issues (not applicable to code). 538 567 * 539 * 2a. Alignment check using fAlignMask. 568 * For regular accesses (non-SSE/AVX & atomic stuff) we only need to 569 * check for #AC in ring-3 code. To simplify this, the need for AC 570 * checking is indicated by IEM_F_X86_AC in IEMCPU::fExec. 571 * 572 * The caller informs us about about SSE/AVX aligned accesses via the 573 * upper bits of fAlignMaskAndCtl and atomic accesses via fAccess. 540 574 */ 541 if (fAlignMask) 542 { 543 Assert(RT_IS_POWER_OF_TWO(fAlignMask + 1)); 544 Assert(fAlignMask < 128); 545 /* test regflat, fAlignMask */ 546 off = iemNativeEmitTestAnyBitsInGpr8Ex(pCodeBuf, off, idxRegFlatPtr, fAlignMask); 575 if (a_fDataTlb) 576 { 577 uint8_t const fAlignMask = (uint8_t)fAlignMaskAndCtl; 578 Assert(!(fAlignMaskAndCtl & ~(UINT32_C(0xff) | IEM_MEMMAP_F_ALIGN_SSE | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC))); 579 Assert(RT_IS_POWER_OF_TWO(fAlignMask + 1U)); 580 Assert(cbMem == fAlignMask + 1U || !(fAccess & IEM_ACCESS_ATOMIC)); 581 Assert(cbMem < 128); /* alignment test assumptions */ 582 583 /* 584 * 2a. Strict alignment check using fAlignMask for atomic, strictly 585 * aligned stuff (SSE & AVX) and AC=1 (ring-3). 586 */ 587 bool const fStrictAlignmentCheck = fAlignMask 588 && ( (fAlignMaskAndCtl & ~UINT32_C(0xff)) 589 || (fAccess & IEM_ACCESS_ATOMIC) 590 || (pReNative->fExec & IEM_F_X86_AC) ); 591 if (fStrictAlignmentCheck) 592 { 593 /* test regflat, fAlignMask */ 594 off = iemNativeEmitTestAnyBitsInGpr8Ex(pCodeBuf, off, idxRegFlatPtr, fAlignMask); 595 547 596 #ifndef IEM_WITH_TLB_STATISTICS 548 /* jnz tlbmiss */549 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne);597 /* jnz tlbmiss */ 598 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne); 550 599 #else 551 /* jz 1F; inc stat; jmp tlbmiss */ 552 uint32_t const offFixup1 = off; 553 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_e); 554 off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2, 555 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissAlignment)); 556 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss); 557 iemNativeFixupFixedJump(pReNative, offFixup1, off); 558 #endif 559 } 560 561 /* 562 * 2b. Check that it's not crossing page a boundrary. This is implicit in 563 * the previous test if the alignment is same or larger than the type. 564 */ 565 if (cbMem > fAlignMask + 1) 566 { 567 /* reg1 = regflat & 0xfff */ 568 off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1,/*=*/ idxRegFlatPtr,/*&*/ GUEST_PAGE_OFFSET_MASK); 569 /* cmp reg1, GUEST_PAGE_SIZE - cbMem */ 570 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, GUEST_PAGE_SIZE); 600 /* jz 1F; inc stat; jmp tlbmiss */ 601 uint32_t const offFixup1 = off; 602 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_e); 603 off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2, 604 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissAlignment)); 605 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss); 606 iemNativeFixupFixedJump(pReNative, offFixup1, off); 607 #endif 608 } 609 610 /* 611 * 2b. Check that it's not crossing page a boundrary if the access is 612 * larger than the aligment mask or if we didn't do the strict 613 * alignment check above. 614 */ 615 if ( cbMem > 1 616 && ( !fStrictAlignmentCheck 617 || cbMem > fAlignMask + 1U)) 618 { 619 /* reg1 = regflat & 0xfff */ 620 off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1,/*=*/ idxRegFlatPtr,/*&*/ GUEST_PAGE_OFFSET_MASK); 621 /* cmp reg1, GUEST_PAGE_SIZE - cbMem */ 622 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, GUEST_PAGE_SIZE - cbMem); 571 623 #ifndef IEM_WITH_TLB_STATISTICS 572 /* ja tlbmiss */573 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_nbe);624 /* ja tlbmiss */ 625 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_nbe); 574 626 #else 575 /* jbe 1F; inc stat; jmp tlbmiss */ 576 uint32_t const offFixup1 = off; 577 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_be); 578 off = iemNativeEmitIncU32CounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2, 579 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissCrossPage)); 580 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss); 581 iemNativeFixupFixedJump(pReNative, offFixup1, off); 582 #endif 583 } 627 /* jbe 1F; inc stat; jmp tlbmiss */ 628 uint32_t const offFixup1 = off; 629 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_be); 630 off = iemNativeEmitIncU32CounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2, 631 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissCrossPage)); 632 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss); 633 iemNativeFixupFixedJump(pReNative, offFixup1, off); 634 #endif 635 } 636 } 637 else 638 Assert(fAlignMaskAndCtl == 0); 584 639 585 640 /*
Note:
See TracChangeset
for help on using the changeset viewer.