Changeset 102424 in vbox for trunk/src/VBox/VMM/include/IEMMc.h
- Timestamp:
- Dec 1, 2023 10:43:39 PM (14 months ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/include/IEMMc.h
r102349 r102424 981 981 ((a_r64Dst).u = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem))) 982 982 # define IEM_MC_FETCH_MEM_FLAT_R80(a_r80Dst, a_GCPtrMem) \ 983 iemMemF etchDataR80Jmp(pVCpu, &(a_r80Dst), UINT8_MAX, (a_GCPtrMem))983 iemMemFlatFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_GCPtrMem)) 984 984 # define IEM_MC_FETCH_MEM_FLAT_D80(a_d80Dst, a_GCPtrMem) \ 985 985 iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), UINT8_MAX, (a_GCPtrMem)) … … 1932 1932 #endif 1933 1933 1934 /** int32_t alias. */ 1935 #ifndef IEM_WITH_SETJMP 1936 # define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1937 IEM_MC_MEM_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) 1938 #else 1939 # define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1940 (a_pi32Mem) = (int32_t *)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1941 #endif 1942 1943 /** Flat int32_t alias. */ 1944 #ifndef IEM_WITH_SETJMP 1945 # define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1946 IEM_MC_MEM_FLAT_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) 1947 #else 1948 # define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1949 (a_pi32Mem) = (int32_t *)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1950 #endif 1951 1952 /** RTFLOAT32U alias. */ 1953 #ifndef IEM_WITH_SETJMP 1954 # define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1955 IEM_MC_MEM_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) 1956 #else 1957 # define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1958 (a_pr32Mem) = (PRTFLOAT32U)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1959 #endif 1960 1961 /** Flat RTFLOAT32U alias. */ 1962 #ifndef IEM_WITH_SETJMP 1963 # define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1964 IEM_MC_MEM_FLAT_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) 1965 #else 1966 # define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1967 (a_pr32Mem) = (PRTFLOAT32U)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1968 #endif 1969 1934 1970 1935 1971 /* 64-bit */ … … 2062 2098 2063 2099 2100 /* misc */ 2101 2102 /** 2103 * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess. 2104 * 2105 * @param[out] a_pr80Mem Where to return the pointer to the mapping. 2106 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 2107 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX! 2108 * @param[in] a_GCPtrMem The memory address. 2109 * @remarks Will return/long jump on errors. 2110 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2111 */ 2112 #ifndef IEM_WITH_SETJMP 2113 # define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 2114 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), sizeof(RTFLOAT80U), (a_iSeg), \ 2115 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \ 2116 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 2117 } while (0) 2118 #else 2119 # define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2120 (a_pr80Mem) = iemMemMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2121 #endif 2122 2123 /** 2124 * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess. 2125 * 2126 * @param[out] a_pr80Mem Where to return the pointer to the mapping. 2127 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 2128 * @param[in] a_GCPtrMem The memory address. 2129 * @remarks Will return/long jump on errors. 2130 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2131 */ 2132 #ifndef IEM_WITH_SETJMP 2133 # define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 2134 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), sizeof(RTFLOAT80U), UINT8_MAX, \ 2135 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \ 2136 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 2137 } while (0) 2138 #else 2139 # define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \ 2140 (a_pr80Mem) = iemMemFlatMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2141 #endif 2142 2143 2064 2144 /* commit + unmap */ 2065 2145 … … 2112 2192 * 2113 2193 * @remarks May in theory return - for now. 2194 * 2195 * @deprecated 2114 2196 */ 2115 2197 #define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \ … … 2120 2202 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \ 2121 2203 } while (0) 2204 2205 2206 /** Commits the memory and unmaps the guest memory unless the FPU status word 2207 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception 2208 * that would cause FLD not to store. 2209 * 2210 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a 2211 * store, while \#P will not. 2212 * 2213 * @remarks May in theory return - for now. 2214 */ 2215 #ifndef IEM_WITH_SETJMP 2216 # define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_pvMem, a_bMapInfo, a_u16FSW) do { \ 2217 RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); \ 2218 if ( !(a_u16FSW & X86_FSW_ES) \ 2219 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \ 2220 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \ 2221 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W)); \ 2222 else \ 2223 iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \ 2224 } while (0) 2225 #else 2226 # define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_pvMem, a_bMapInfo, a_u16FSW) do { \ 2227 if ( !(a_u16FSW & X86_FSW_ES) \ 2228 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \ 2229 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \ 2230 iemMemCommitAndUnmapWoJmp(pVCpu, (a_pvMem), a_bMapInfo); \ 2231 else \ 2232 iemMemRollbackAndUnmapWo(pVCpu, (a_pvMem), a_bMapInfo); \ 2233 } while (0) 2234 #endif 2235 2236 /** Rolls back (conceptually only, assumes no writes) and unmaps the guest memory. */ 2237 #ifndef IEM_WITH_SETJMP 2238 # define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_pvMem, a_bMapInfo) do { \ 2239 RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); \ 2240 iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \ 2241 } while (0) 2242 #else 2243 # define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_pvMem, a_bMapInfo) \ 2244 iemMemRollbackAndUnmapWo(pVCpu, (a_pvMem), a_bMapInfo) 2245 #endif 2122 2246 2123 2247
Note:
See TracChangeset
for help on using the changeset viewer.