Changeset 100820 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Aug 8, 2023 2:58:44 AM (18 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 added
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r100815 r100820 6912 6912 6913 6913 6914 /** 6915 * Fetches a data byte. 6916 * 6917 * @returns Strict VBox status code. 6918 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6919 * @param pu8Dst Where to return the byte. 6920 * @param iSegReg The index of the segment register to use for 6921 * this access. The base and limits are checked. 6922 * @param GCPtrMem The address of the guest memory. 6923 */ 6924 VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT 6925 { 6926 /* The lazy approach for now... */ 6927 uint8_t const *pu8Src; 6928 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0); 6929 if (rc == VINF_SUCCESS) 6930 { 6931 *pu8Dst = *pu8Src; 6932 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R); 6933 Log9(("IEM RD byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, *pu8Dst)); 6934 } 6935 return rc; 6936 } 6937 6938 6939 #ifdef IEM_WITH_SETJMP 6940 /** 6941 * Fetches a data byte, longjmp on error. 6942 * 6943 * @returns The byte. 6944 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6945 * @param iSegReg The index of the segment register to use for 6946 * this access. The base and limits are checked. 6947 * @param GCPtrMem The address of the guest memory. 6948 */ 6949 uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 6950 { 6951 /* The lazy approach for now... */ 6952 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0); 6953 uint8_t const bRet = *pu8Src; 6954 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R); 6955 Log9(("IEM RD byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, bRet)); 6956 return bRet; 6957 } 6958 #endif /* IEM_WITH_SETJMP */ 6959 6960 6961 /** 6962 * Fetches a data word. 6963 * 6964 * @returns Strict VBox status code. 6965 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6966 * @param pu16Dst Where to return the word. 6967 * @param iSegReg The index of the segment register to use for 6968 * this access. The base and limits are checked. 6969 * @param GCPtrMem The address of the guest memory. 6970 */ 6971 VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT 6972 { 6973 /* The lazy approach for now... */ 6974 uint16_t const *pu16Src; 6975 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, 6976 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1); 6977 if (rc == VINF_SUCCESS) 6978 { 6979 *pu16Dst = *pu16Src; 6980 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R); 6981 Log9(("IEM RD word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, *pu16Dst)); 6982 } 6983 return rc; 6984 } 6985 6986 6987 #ifdef IEM_WITH_SETJMP 6988 /** 6989 * Fetches a data word, longjmp on error. 6990 * 6991 * @returns The word 6992 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6993 * @param iSegReg The index of the segment register to use for 6994 * this access. The base and limits are checked. 6995 * @param GCPtrMem The address of the guest memory. 6996 */ 6997 uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 6998 { 6999 /* The lazy approach for now... */ 7000 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7001 sizeof(*pu16Src) - 1); 7002 uint16_t const u16Ret = *pu16Src; 7003 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R); 7004 Log9(("IEM RD word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, u16Ret)); 7005 return u16Ret; 7006 } 7007 #endif 7008 7009 7010 /** 7011 * Fetches a data dword. 7012 * 7013 * @returns Strict VBox status code. 7014 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7015 * @param pu32Dst Where to return the dword. 7016 * @param iSegReg The index of the segment register to use for 7017 * this access. The base and limits are checked. 7018 * @param GCPtrMem The address of the guest memory. 7019 */ 7020 VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT 7021 { 7022 /* The lazy approach for now... */ 7023 uint32_t const *pu32Src; 7024 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, 7025 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1); 7026 if (rc == VINF_SUCCESS) 7027 { 7028 *pu32Dst = *pu32Src; 7029 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R); 7030 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, *pu32Dst)); 7031 } 7032 return rc; 7033 } 6914 /* 6915 * Instantiate R/W templates. 6916 */ 6917 #define TMPL_MEM_TYPE uint8_t 6918 #define TMPL_MEM_FN_SUFF U8 6919 #define TMPL_MEM_FMT_TYPE "%#04x" 6920 #define TMPL_MEM_FMT_DESC "byte" 6921 #include "IEMAllMemRWTmpl.cpp.h" 6922 6923 #define TMPL_MEM_TYPE uint16_t 6924 #define TMPL_MEM_FN_SUFF U16 6925 #define TMPL_MEM_FMT_TYPE "%#06x" 6926 #define TMPL_MEM_FMT_DESC "word" 6927 #include "IEMAllMemRWTmpl.cpp.h" 6928 6929 #define TMPL_MEM_TYPE uint32_t 6930 #define TMPL_MEM_FN_SUFF U32 6931 #define TMPL_MEM_FMT_TYPE "%#010x" 6932 #define TMPL_MEM_FMT_DESC "dword" 6933 #include "IEMAllMemRWTmpl.cpp.h" 6934 6935 #define TMPL_MEM_TYPE uint64_t 6936 #define TMPL_MEM_FN_SUFF U64 6937 #define TMPL_MEM_FMT_TYPE "%#018RX64" 6938 #define TMPL_MEM_FMT_DESC "qword" 6939 #include "IEMAllMemRWTmpl.cpp.h" 6940 6941 #define TMPL_MEM_TYPE uint64_t 6942 #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1) 6943 #define TMPL_MEM_FN_SUFF U64AlignedU128 6944 #define TMPL_MEM_FMT_TYPE "%#018RX64" 6945 #define TMPL_MEM_FMT_DESC "qword" 6946 #include "IEMAllMemRWTmpl.cpp.h" 7034 6947 7035 6948 … … 7060 6973 7061 6974 7062 #ifdef IEM_WITH_SETJMP7063 7064 /**7065 * Fetches a data dword, longjmp on error, fallback/safe version.7066 *7067 * @returns The dword7068 * @param pVCpu The cross context virtual CPU structure of the calling thread.7069 * @param iSegReg The index of the segment register to use for7070 * this access. The base and limits are checked.7071 * @param GCPtrMem The address of the guest memory.7072 */7073 uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP7074 {7075 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,7076 sizeof(*pu32Src) - 1);7077 uint32_t const u32Ret = *pu32Src;7078 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);7079 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Ret));7080 return u32Ret;7081 }7082 7083 7084 /**7085 * Fetches a data dword, longjmp on error.7086 *7087 * @returns The dword7088 * @param pVCpu The cross context virtual CPU structure of the calling thread.7089 * @param iSegReg The index of the segment register to use for7090 * this access. The base and limits are checked.7091 * @param GCPtrMem The address of the guest memory.7092 */7093 uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP7094 {7095 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)7096 /*7097 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.7098 */7099 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);7100 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))7101 {7102 /*7103 * TLB lookup.7104 */7105 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);7106 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);7107 if (pTlbe->uTag == uTag)7108 {7109 /*7110 * Check TLB page table level access flags.7111 */7112 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);7113 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;7114 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ7115 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))7116 == pVCpu->iem.s.DataTlb.uTlbPhysRev)7117 {7118 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});7119 7120 /*7121 * Alignment check:7122 */7123 /** @todo check priority \#AC vs \#PF */7124 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))7125 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)7126 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC7127 || IEM_GET_CPL(pVCpu) != 3)7128 {7129 /*7130 * Fetch and return the dword7131 */7132 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */7133 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));7134 uint32_t const u32Ret = *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];7135 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Ret));7136 return u32Ret;7137 }7138 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));7139 iemRaiseAlignmentCheckExceptionJmp(pVCpu);7140 }7141 }7142 }7143 7144 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception7145 outdated page pointer, or other troubles. */7146 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));7147 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);7148 7149 # else7150 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,7151 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);7152 uint32_t const u32Ret = *pu32Src;7153 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);7154 Log9(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Ret));7155 return u32Ret;7156 # endif7157 }7158 7159 /**7160 * Fetches a data dword from a FLAT address, longjmp on error.7161 *7162 * @returns The dword7163 * @param pVCpu The cross context virtual CPU structure of the calling thread.7164 * @param GCPtrMem The address of the guest memory.7165 */7166 uint32_t iemMemFlatFetchDataU32Jmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP7167 {7168 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)7169 /*7170 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.7171 */7172 RTGCPTR GCPtrEff = GCPtrMem;7173 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))7174 {7175 /*7176 * TLB lookup.7177 */7178 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);7179 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);7180 if (pTlbe->uTag == uTag)7181 {7182 /*7183 * Check TLB page table level access flags.7184 */7185 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);7186 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;7187 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ7188 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))7189 == pVCpu->iem.s.DataTlb.uTlbPhysRev)7190 {7191 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});7192 7193 /*7194 * Alignment check:7195 */7196 /** @todo check priority \#AC vs \#PF */7197 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))7198 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)7199 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC7200 || IEM_GET_CPL(pVCpu) != 3)7201 {7202 /*7203 * Fetch and return the dword7204 */7205 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */7206 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));7207 uint32_t const u32Ret = *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];7208 Log9(("IEM RD dword %RGv: %#010x\n", GCPtrMem, u32Ret));7209 return u32Ret;7210 }7211 Log10(("iemMemFlatFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));7212 iemRaiseAlignmentCheckExceptionJmp(pVCpu);7213 }7214 }7215 }7216 7217 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception7218 outdated page pointer, or other troubles. */7219 Log10(("iemMemFlatFetchDataU32Jmp: %RGv fallback\n", GCPtrMem));7220 return iemMemFetchDataU32SafeJmp(pVCpu, UINT8_MAX, GCPtrMem);7221 7222 # else7223 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), UINT8_MAX, GCPtrMem,7224 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);7225 uint32_t const u32Ret = *pu32Src;7226 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);7227 Log9(("IEM RD dword %RGv: %#010x\n", GCPtrMem, u32Ret));7228 return u32Ret;7229 # endif7230 }7231 7232 #endif /* IEM_WITH_SETJMP */7233 7234 7235 6975 #ifdef SOME_UNUSED_FUNCTION 7236 6976 /** … … 7261 7001 #endif 7262 7002 return rc; 7263 }7264 #endif7265 7266 7267 /**7268 * Fetches a data qword.7269 *7270 * @returns Strict VBox status code.7271 * @param pVCpu The cross context virtual CPU structure of the calling thread.7272 * @param pu64Dst Where to return the qword.7273 * @param iSegReg The index of the segment register to use for7274 * this access. The base and limits are checked.7275 * @param GCPtrMem The address of the guest memory.7276 */7277 VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT7278 {7279 /* The lazy approach for now... */7280 uint64_t const *pu64Src;7281 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,7282 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);7283 if (rc == VINF_SUCCESS)7284 {7285 *pu64Dst = *pu64Src;7286 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);7287 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, *pu64Dst));7288 }7289 return rc;7290 }7291 7292 7293 #ifdef IEM_WITH_SETJMP7294 /**7295 * Fetches a data qword, longjmp on error.7296 *7297 * @returns The qword.7298 * @param pVCpu The cross context virtual CPU structure of the calling thread.7299 * @param iSegReg The index of the segment register to use for7300 * this access. The base and limits are checked.7301 * @param GCPtrMem The address of the guest memory.7302 */7303 uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP7304 {7305 /* The lazy approach for now... */7306 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,7307 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);7308 uint64_t const u64Ret = *pu64Src;7309 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);7310 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Ret));7311 return u64Ret;7312 }7313 #endif7314 7315 7316 /**7317 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).7318 *7319 * @returns Strict VBox status code.7320 * @param pVCpu The cross context virtual CPU structure of the calling thread.7321 * @param pu64Dst Where to return the qword.7322 * @param iSegReg The index of the segment register to use for7323 * this access. The base and limits are checked.7324 * @param GCPtrMem The address of the guest memory.7325 */7326 VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT7327 {7328 /* The lazy approach for now... */7329 uint64_t const *pu64Src;7330 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,7331 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);7332 if (rc == VINF_SUCCESS)7333 {7334 *pu64Dst = *pu64Src;7335 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);7336 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, *pu64Dst));7337 }7338 return rc;7339 }7340 7341 7342 #ifdef IEM_WITH_SETJMP7343 /**7344 * Fetches a data qword, longjmp on error.7345 *7346 * @returns The qword.7347 * @param pVCpu The cross context virtual CPU structure of the calling thread.7348 * @param iSegReg The index of the segment register to use for7349 * this access. The base and limits are checked.7350 * @param GCPtrMem The address of the guest memory.7351 */7352 uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP7353 {7354 /* The lazy approach for now... */7355 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,7356 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);7357 uint64_t const u64Ret = *pu64Src;7358 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);7359 Log9(("IEM RD qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Ret));7360 return u64Ret;7361 7003 } 7362 7004 #endif … … 7749 7391 return rcStrict; 7750 7392 } 7751 7752 7753 7754 /**7755 * Stores a data byte.7756 *7757 * @returns Strict VBox status code.7758 * @param pVCpu The cross context virtual CPU structure of the calling thread.7759 * @param iSegReg The index of the segment register to use for7760 * this access. The base and limits are checked.7761 * @param GCPtrMem The address of the guest memory.7762 * @param u8Value The value to store.7763 */7764 VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT7765 {7766 /* The lazy approach for now... */7767 uint8_t *pu8Dst;7768 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);7769 if (rc == VINF_SUCCESS)7770 {7771 *pu8Dst = u8Value;7772 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);7773 Log8(("IEM WR byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, u8Value));7774 }7775 return rc;7776 }7777 7778 7779 #ifdef IEM_WITH_SETJMP7780 /**7781 * Stores a data byte, longjmp on error.7782 *7783 * @param pVCpu The cross context virtual CPU structure of the calling thread.7784 * @param iSegReg The index of the segment register to use for7785 * this access. The base and limits are checked.7786 * @param GCPtrMem The address of the guest memory.7787 * @param u8Value The value to store.7788 */7789 void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP7790 {7791 /* The lazy approach for now... */7792 Log8(("IEM WR byte %d|%RGv: %#04x\n", iSegReg, GCPtrMem, u8Value));7793 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);7794 *pu8Dst = u8Value;7795 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);7796 }7797 #endif7798 7799 7800 /**7801 * Stores a data word.7802 *7803 * @returns Strict VBox status code.7804 * @param pVCpu The cross context virtual CPU structure of the calling thread.7805 * @param iSegReg The index of the segment register to use for7806 * this access. The base and limits are checked.7807 * @param GCPtrMem The address of the guest memory.7808 * @param u16Value The value to store.7809 */7810 VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT7811 {7812 /* The lazy approach for now... */7813 uint16_t *pu16Dst;7814 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,7815 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);7816 if (rc == VINF_SUCCESS)7817 {7818 *pu16Dst = u16Value;7819 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);7820 Log8(("IEM WR word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, u16Value));7821 }7822 return rc;7823 }7824 7825 7826 #ifdef IEM_WITH_SETJMP7827 /**7828 * Stores a data word, longjmp on error.7829 *7830 * @param pVCpu The cross context virtual CPU structure of the calling thread.7831 * @param iSegReg The index of the segment register to use for7832 * this access. The base and limits are checked.7833 * @param GCPtrMem The address of the guest memory.7834 * @param u16Value The value to store.7835 */7836 void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP7837 {7838 /* The lazy approach for now... */7839 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,7840 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);7841 *pu16Dst = u16Value;7842 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);7843 Log8(("IEM WR word %d|%RGv: %#06x\n", iSegReg, GCPtrMem, u16Value));7844 }7845 #endif7846 7847 7848 /**7849 * Stores a data dword.7850 *7851 * @returns Strict VBox status code.7852 * @param pVCpu The cross context virtual CPU structure of the calling thread.7853 * @param iSegReg The index of the segment register to use for7854 * this access. The base and limits are checked.7855 * @param GCPtrMem The address of the guest memory.7856 * @param u32Value The value to store.7857 */7858 VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT7859 {7860 /* The lazy approach for now... */7861 uint32_t *pu32Dst;7862 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,7863 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);7864 if (rc == VINF_SUCCESS)7865 {7866 *pu32Dst = u32Value;7867 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);7868 Log8(("IEM WR dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Value));7869 }7870 return rc;7871 }7872 7873 7874 #ifdef IEM_WITH_SETJMP7875 /**7876 * Stores a data dword.7877 *7878 * @returns Strict VBox status code.7879 * @param pVCpu The cross context virtual CPU structure of the calling thread.7880 * @param iSegReg The index of the segment register to use for7881 * this access. The base and limits are checked.7882 * @param GCPtrMem The address of the guest memory.7883 * @param u32Value The value to store.7884 */7885 void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP7886 {7887 /* The lazy approach for now... */7888 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,7889 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);7890 *pu32Dst = u32Value;7891 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);7892 Log8(("IEM WR dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, u32Value));7893 }7894 #endif7895 7896 7897 /**7898 * Stores a data qword.7899 *7900 * @returns Strict VBox status code.7901 * @param pVCpu The cross context virtual CPU structure of the calling thread.7902 * @param iSegReg The index of the segment register to use for7903 * this access. The base and limits are checked.7904 * @param GCPtrMem The address of the guest memory.7905 * @param u64Value The value to store.7906 */7907 VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT7908 {7909 /* The lazy approach for now... */7910 uint64_t *pu64Dst;7911 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,7912 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);7913 if (rc == VINF_SUCCESS)7914 {7915 *pu64Dst = u64Value;7916 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);7917 Log8(("IEM WR qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Value));7918 }7919 return rc;7920 }7921 7922 7923 #ifdef IEM_WITH_SETJMP7924 /**7925 * Stores a data qword, longjmp on error.7926 *7927 * @param pVCpu The cross context virtual CPU structure of the calling thread.7928 * @param iSegReg The index of the segment register to use for7929 * this access. The base and limits are checked.7930 * @param GCPtrMem The address of the guest memory.7931 * @param u64Value The value to store.7932 */7933 void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP7934 {7935 /* The lazy approach for now... */7936 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,7937 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);7938 *pu64Dst = u64Value;7939 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);7940 Log8(("IEM WR qword %d|%RGv: %#018RX64\n", iSegReg, GCPtrMem, u64Value));7941 }7942 #endif7943 7393 7944 7394 -
trunk/src/VBox/VMM/include/IEMInline.h
r100810 r100820 3356 3356 else 3357 3357 { 3358 Assert(GCPtrMem <= UINT32_MAX); 3358 3359 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 3359 3360 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg); 3360 3361 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE 3361 3362 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN); 3362 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */ 3363 if ( fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE) /* data, expand up */ 3364 /** @todo explore exactly how the CS stuff works in real mode. See also 3365 * http://www.rcollins.org/Productivity/DescriptorCache.html and 3366 * http://www.rcollins.org/ddj/Aug98/Aug98.html for some insight. */ 3367 || (iSegReg == X86_SREG_CS && IEM_IS_REAL_OR_V86_MODE(pVCpu)) ) /* Ignored for CS. */ /** @todo testcase! */ 3363 3368 { 3364 3369 /* expand up */ 3365 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;3366 if (RT_LIKELY( GCPtrLast32 >pSel->u32Limit3367 && GCPtrLast32 > (uint32_t)GCPtrMem))3370 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1; 3371 if (RT_LIKELY( GCPtrLast32 <= pSel->u32Limit 3372 && GCPtrLast32 >= (uint32_t)GCPtrMem)) 3368 3373 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base; 3374 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W); 3369 3375 } 3370 3376 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */ 3371 3377 { 3372 /* expand down */3373 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem ;3374 if (RT_LIKELY( (uint32_t)GCPtrMem > 3375 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX :UINT32_C(0xffff))3376 && GCPtrLast32 > (uint32_t)GCPtrMem))3378 /* expand down - the uppger boundary is defined by the B bit, not G. */ 3379 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1; 3380 if (RT_LIKELY( (uint32_t)GCPtrMem >= pSel->u32Limit 3381 && (pSel->Attr.n.u1DefBig || GCPtrLast32 <= UINT32_C(0xffff)) 3382 && GCPtrLast32 >= (uint32_t)GCPtrMem)) 3377 3383 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base; 3384 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W); 3378 3385 } 3379 3386 else 3380 3387 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W); 3381 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);3382 3388 } 3383 3389 iemRaiseGeneralProtectionFault0Jmp(pVCpu); … … 3403 3409 } 3404 3410 3411 /* 3412 * Instantiate R/W inline templates. 3413 */ 3414 #define TMPL_MEM_TYPE uint8_t 3415 #define TMPL_MEM_FN_SUFF U8 3416 #define TMPL_MEM_FMT_TYPE "%#04x" 3417 #define TMPL_MEM_FMT_DESC "byte" 3418 #include "../VMMAll/IEMAllMemRWTmplInline.cpp.h" 3419 3420 #define TMPL_MEM_TYPE uint16_t 3421 #define TMPL_MEM_FN_SUFF U16 3422 #define TMPL_MEM_FMT_TYPE "%#06x" 3423 #define TMPL_MEM_FMT_DESC "word" 3424 #include "../VMMAll/IEMAllMemRWTmplInline.cpp.h" 3425 3426 #define TMPL_MEM_TYPE uint32_t 3427 #define TMPL_MEM_FN_SUFF U32 3428 #define TMPL_MEM_FMT_TYPE "%#010x" 3429 #define TMPL_MEM_FMT_DESC "dword" 3430 #include "../VMMAll/IEMAllMemRWTmplInline.cpp.h" 3431 3432 #define TMPL_MEM_TYPE uint64_t 3433 #define TMPL_MEM_FN_SUFF U64 3434 #define TMPL_MEM_FMT_TYPE "%#018RX64" 3435 #define TMPL_MEM_FMT_DESC "qword" 3436 #include "../VMMAll/IEMAllMemRWTmplInline.cpp.h" 3437 3438 #define TMPL_MEM_NO_STORE 3439 #define TMPL_MEM_TYPE uint64_t 3440 #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1) 3441 #define TMPL_MEM_FN_SUFF U64AlignedU128 3442 #define TMPL_MEM_FMT_TYPE "%#018RX64" 3443 #define TMPL_MEM_FMT_DESC "qword" 3444 #include "../VMMAll/IEMAllMemRWTmplInline.cpp.h" 3445 #undef TMPL_MEM_NO_STORE 3446 3405 3447 /** @} */ 3406 3448 -
trunk/src/VBox/VMM/include/IEMInternal.h
r100811 r100820 4585 4585 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT; 4586 4586 #ifdef IEM_WITH_SETJMP 4587 uint8_t iemMemFetchDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4588 uint16_t iemMemFetchDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4589 uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4590 uint32_t iemMemFlatFetchDataU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4591 uint64_t iemMemFetchDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4592 uint64_t iemMemFetchDataU64AlignedU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4593 void iemMemFetchDataR80SafeJmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4594 void iemMemFetchDataD80SafeJmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4595 void iemMemFetchDataU128SafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4596 void iemMemFetchDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4597 void iemMemFetchDataU256SafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4598 void iemMemFetchDataU256AlignedSseSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4599 # if 0 /* these are inlined now */ 4587 4600 uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4588 4601 uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; … … 4591 4604 uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4592 4605 uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4606 # endif 4593 4607 void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 4594 4608 void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; … … 4615 4629 VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; 4616 4630 #ifdef IEM_WITH_SETJMP 4631 void iemMemStoreDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP; 4632 void iemMemStoreDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP; 4633 void iemMemStoreDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP; 4634 void iemMemStoreDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP; 4635 void iemMemStoreDataU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP; 4636 void iemMemStoreDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP; 4637 void iemMemStoreDataU256SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP; 4638 void iemMemStoreDataU256AlignedAvxSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP; 4639 #if 0 4617 4640 void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP; 4618 4641 void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP; 4619 4642 void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP; 4620 4643 void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP; 4644 #endif 4621 4645 void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP; 4622 4646 void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP; -
trunk/src/VBox/VMM/include/IEMMc.h
r100811 r100820 816 816 817 817 # define IEM_MC_FETCH_MEM_FLAT_U8(a_u8Dst, a_GCPtrMem) \ 818 ((a_u8Dst) = iemMemF etchDataU8Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))818 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem))) 819 819 # define IEM_MC_FETCH_MEM16_FLAT_U8(a_u8Dst, a_GCPtrMem16) \ 820 ((a_u8Dst) = iemMemF etchDataU8Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem16)))820 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem16))) 821 821 # define IEM_MC_FETCH_MEM32_FLAT_U8(a_u8Dst, a_GCPtrMem32) \ 822 ((a_u8Dst) = iemMemF etchDataU8Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem32)))822 ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem32))) 823 823 #endif 824 824 … … 839 839 840 840 # define IEM_MC_FETCH_MEM_FLAT_U16(a_u16Dst, a_GCPtrMem) \ 841 ((a_u16Dst) = iemMemF etchDataU16Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))841 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem))) 842 842 # define IEM_MC_FETCH_MEM_FLAT_U16_DISP(a_u16Dst, a_GCPtrMem, a_offDisp) \ 843 ((a_u16Dst) = iemMemF etchDataU16Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem) + (a_offDisp)))843 ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp))) 844 844 # define IEM_MC_FETCH_MEM_FLAT_I16(a_i16Dst, a_GCPtrMem) \ 845 ((a_i16Dst) = (int16_t)iemMemF etchDataU16Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))845 ((a_i16Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem))) 846 846 #endif 847 847 … … 894 894 895 895 # define IEM_MC_FETCH_MEM_FLAT_U64(a_u64Dst, a_GCPtrMem) \ 896 ((a_u64Dst) = iemMemF etchDataU64Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))896 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem))) 897 897 # define IEM_MC_FETCH_MEM_FLAT_U64_DISP(a_u64Dst, a_GCPtrMem, a_offDisp) \ 898 ((a_u64Dst) = iemMemF etchDataU64Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem) + (a_offDisp)))898 ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp))) 899 899 # define IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128(a_u64Dst, a_GCPtrMem) \ 900 ((a_u64Dst) = iemMemF etchDataU64AlignedU128Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))900 ((a_u64Dst) = iemMemFlatFetchDataU64AlignedU128Jmp(pVCpu, (a_GCPtrMem))) 901 901 # define IEM_MC_FETCH_MEM_FLAT_I64(a_i64Dst, a_GCPtrMem) \ 902 ((a_i64Dst) = (int64_t)iemMemF etchDataU64Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))902 ((a_i64Dst) = (int64_t)iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem))) 903 903 #endif 904 904 … … 925 925 ((a_r32Dst).u = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem))) 926 926 # define IEM_MC_FETCH_MEM_FLAT_R64(a_r64Dst, a_GCPtrMem) \ 927 ((a_r64Dst).u = iemMemF etchDataU64Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))927 ((a_r64Dst).u = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem))) 928 928 # define IEM_MC_FETCH_MEM_FLAT_R80(a_r80Dst, a_GCPtrMem) \ 929 929 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), UINT8_MAX, (a_GCPtrMem)) … … 1086 1086 1087 1087 # define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16(a_u16Dst, a_GCPtrMem) \ 1088 ((a_u16Dst) = iemMemF etchDataU8Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))1088 ((a_u16Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem))) 1089 1089 # define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32(a_u32Dst, a_GCPtrMem) \ 1090 ((a_u32Dst) = iemMemF etchDataU8Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))1090 ((a_u32Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem))) 1091 1091 # define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64(a_u64Dst, a_GCPtrMem) \ 1092 ((a_u64Dst) = iemMemF etchDataU8Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))1092 ((a_u64Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem))) 1093 1093 # define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32(a_u32Dst, a_GCPtrMem) \ 1094 ((a_u32Dst) = iemMemF etchDataU16Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))1094 ((a_u32Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem))) 1095 1095 # define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64(a_u64Dst, a_GCPtrMem) \ 1096 ((a_u64Dst) = iemMemF etchDataU16Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))1096 ((a_u64Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem))) 1097 1097 # define IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64(a_u64Dst, a_GCPtrMem) \ 1098 1098 ((a_u64Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem))) … … 1151 1151 1152 1152 # define IEM_MC_FETCH_MEM_FLAT_U8_SX_U16(a_u16Dst, a_GCPtrMem) \ 1153 ((a_u16Dst) = (int8_t)iemMemF etchDataU8Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))1153 ((a_u16Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem))) 1154 1154 # define IEM_MC_FETCH_MEM_FLAT_U8_SX_U32(a_u32Dst, a_GCPtrMem) \ 1155 ((a_u32Dst) = (int8_t)iemMemF etchDataU8Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))1155 ((a_u32Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem))) 1156 1156 # define IEM_MC_FETCH_MEM_FLAT_U8_SX_U64(a_u64Dst, a_GCPtrMem) \ 1157 ((a_u64Dst) = (int8_t)iemMemF etchDataU8Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))1157 ((a_u64Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem))) 1158 1158 # define IEM_MC_FETCH_MEM_FLAT_U16_SX_U32(a_u32Dst, a_GCPtrMem) \ 1159 ((a_u32Dst) = (int16_t)iemMemF etchDataU16Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))1159 ((a_u32Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem))) 1160 1160 # define IEM_MC_FETCH_MEM_FLAT_U16_SX_U64(a_u64Dst, a_GCPtrMem) \ 1161 ((a_u64Dst) = (int16_t)iemMemF etchDataU16Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem)))1161 ((a_u64Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem))) 1162 1162 # define IEM_MC_FETCH_MEM_FLAT_U32_SX_U64(a_u64Dst, a_GCPtrMem) \ 1163 1163 ((a_u64Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem))) … … 1184 1184 1185 1185 # define IEM_MC_STORE_MEM_FLAT_U8(a_GCPtrMem, a_u8Value) \ 1186 iemMem StoreDataU8Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u8Value))1186 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8Value)) 1187 1187 # define IEM_MC_STORE_MEM_FLAT_U16(a_GCPtrMem, a_u16Value) \ 1188 iemMem StoreDataU16Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u16Value))1188 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16Value)) 1189 1189 # define IEM_MC_STORE_MEM_FLAT_U32(a_GCPtrMem, a_u32Value) \ 1190 iemMem StoreDataU32Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u32Value))1190 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32Value)) 1191 1191 # define IEM_MC_STORE_MEM_FLAT_U64(a_GCPtrMem, a_u64Value) \ 1192 iemMem StoreDataU64Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u64Value))1192 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64Value)) 1193 1193 #endif 1194 1194 … … 1213 1213 1214 1214 # define IEM_MC_STORE_MEM_FLAT_U8_CONST(a_GCPtrMem, a_u8C) \ 1215 iemMem StoreDataU8Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u8C))1215 iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8C)) 1216 1216 # define IEM_MC_STORE_MEM_FLAT_U16_CONST(a_GCPtrMem, a_u16C) \ 1217 iemMem StoreDataU16Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u16C))1217 iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16C)) 1218 1218 # define IEM_MC_STORE_MEM_FLAT_U32_CONST(a_GCPtrMem, a_u32C) \ 1219 iemMem StoreDataU32Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u32C))1219 iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32C)) 1220 1220 # define IEM_MC_STORE_MEM_FLAT_U64_CONST(a_GCPtrMem, a_u64C) \ 1221 iemMem StoreDataU64Jmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u64C))1221 iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64C)) 1222 1222 #endif 1223 1223
Note:
See TracChangeset
for help on using the changeset viewer.