Changeset 102424 in vbox for trunk/src/VBox/VMM/include
- Timestamp:
- Dec 1, 2023 10:43:39 PM (17 months ago)
- svn:sync-xref-src-repo-rev:
- 160567
- Location:
- trunk/src/VBox/VMM/include
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/include/IEMInline.h
r101387 r102424 3744 3744 } 3745 3745 3746 DECLINLINE(void) iemMemRollbackAndUnmapWo(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) RT_NOEXCEPT 3747 { 3748 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 3749 if (RT_LIKELY(bMapInfo == 0)) 3750 return; 3751 # endif 3752 iemMemRollbackAndUnmapWoSafe(pVCpu, pvMem, bMapInfo); 3753 } 3754 3746 3755 #endif /* IEM_WITH_SETJMP */ 3747 3756 … … 3819 3828 #include "../VMMAll/IEMAllMemRWTmplInline.cpp.h" 3820 3829 3830 #undef TMPL_MEM_NO_STORE 3831 #undef TMPL_MEM_NO_MAPPING 3832 3833 #define TMPL_MEM_TYPE RTFLOAT80U 3834 #define TMPL_MEM_TYPE_ALIGN 7 3835 #define TMPL_MEM_TYPE_SIZE 10 3836 #define TMPL_MEM_FN_SUFF R80 3837 #define TMPL_MEM_FMT_TYPE "%.10Rhxs" 3838 #define TMPL_MEM_FMT_DESC "tword" 3839 #include "../VMMAll/IEMAllMemRWTmplInline.cpp.h" 3840 3821 3841 #undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK 3822 3842 -
trunk/src/VBox/VMM/include/IEMInternal.h
r102394 r102424 5045 5045 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT; 5046 5046 #endif 5047 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT; 5047 5048 void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT; 5048 5049 VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT; … … 5110 5111 uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5111 5112 uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5113 void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PRTFLOAT80U pr80Dst) IEM_NOEXCEPT_MAY_LONGJMP; 5112 5114 # endif 5113 void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;5114 5115 void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5115 5116 void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; … … 5143 5144 void iemMemStoreDataU256SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP; 5144 5145 void iemMemStoreDataU256AlignedAvxSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP; 5146 void iemMemStoreDataR80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTFLOAT80U pr80Value) IEM_NOEXCEPT_MAY_LONGJMP; 5145 5147 #if 0 5146 5148 void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP; … … 5168 5170 uint64_t *iemMemMapDataU64WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5169 5171 uint64_t const *iemMemMapDataU64RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5172 PRTFLOAT80U iemMemMapDataR80RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5173 PRTFLOAT80U iemMemMapDataR80WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5174 PCRTFLOAT80U iemMemMapDataR80RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5170 5175 5171 5176 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5172 5177 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5173 5178 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5179 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) RT_NOEXCEPT; 5174 5180 #endif 5175 5181 -
trunk/src/VBox/VMM/include/IEMMc.h
r102349 r102424 981 981 ((a_r64Dst).u = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem))) 982 982 # define IEM_MC_FETCH_MEM_FLAT_R80(a_r80Dst, a_GCPtrMem) \ 983 iemMemF etchDataR80Jmp(pVCpu, &(a_r80Dst), UINT8_MAX, (a_GCPtrMem))983 iemMemFlatFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_GCPtrMem)) 984 984 # define IEM_MC_FETCH_MEM_FLAT_D80(a_d80Dst, a_GCPtrMem) \ 985 985 iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), UINT8_MAX, (a_GCPtrMem)) … … 1932 1932 #endif 1933 1933 1934 /** int32_t alias. */ 1935 #ifndef IEM_WITH_SETJMP 1936 # define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1937 IEM_MC_MEM_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) 1938 #else 1939 # define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1940 (a_pi32Mem) = (int32_t *)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1941 #endif 1942 1943 /** Flat int32_t alias. */ 1944 #ifndef IEM_WITH_SETJMP 1945 # define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1946 IEM_MC_MEM_FLAT_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) 1947 #else 1948 # define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1949 (a_pi32Mem) = (int32_t *)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1950 #endif 1951 1952 /** RTFLOAT32U alias. */ 1953 #ifndef IEM_WITH_SETJMP 1954 # define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1955 IEM_MC_MEM_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) 1956 #else 1957 # define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1958 (a_pr32Mem) = (PRTFLOAT32U)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1959 #endif 1960 1961 /** Flat RTFLOAT32U alias. */ 1962 #ifndef IEM_WITH_SETJMP 1963 # define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1964 IEM_MC_MEM_FLAT_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) 1965 #else 1966 # define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1967 (a_pr32Mem) = (PRTFLOAT32U)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1968 #endif 1969 1934 1970 1935 1971 /* 64-bit */ … … 2062 2098 2063 2099 2100 /* misc */ 2101 2102 /** 2103 * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess. 2104 * 2105 * @param[out] a_pr80Mem Where to return the pointer to the mapping. 2106 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 2107 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX! 2108 * @param[in] a_GCPtrMem The memory address. 2109 * @remarks Will return/long jump on errors. 2110 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2111 */ 2112 #ifndef IEM_WITH_SETJMP 2113 # define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 2114 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), sizeof(RTFLOAT80U), (a_iSeg), \ 2115 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \ 2116 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 2117 } while (0) 2118 #else 2119 # define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2120 (a_pr80Mem) = iemMemMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2121 #endif 2122 2123 /** 2124 * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess. 2125 * 2126 * @param[out] a_pr80Mem Where to return the pointer to the mapping. 2127 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 2128 * @param[in] a_GCPtrMem The memory address. 2129 * @remarks Will return/long jump on errors. 2130 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2131 */ 2132 #ifndef IEM_WITH_SETJMP 2133 # define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 2134 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), sizeof(RTFLOAT80U), UINT8_MAX, \ 2135 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \ 2136 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 2137 } while (0) 2138 #else 2139 # define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \ 2140 (a_pr80Mem) = iemMemFlatMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2141 #endif 2142 2143 2064 2144 /* commit + unmap */ 2065 2145 … … 2112 2192 * 2113 2193 * @remarks May in theory return - for now. 2194 * 2195 * @deprecated 2114 2196 */ 2115 2197 #define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \ … … 2120 2202 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \ 2121 2203 } while (0) 2204 2205 2206 /** Commits the memory and unmaps the guest memory unless the FPU status word 2207 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception 2208 * that would cause FLD not to store. 2209 * 2210 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a 2211 * store, while \#P will not. 2212 * 2213 * @remarks May in theory return - for now. 2214 */ 2215 #ifndef IEM_WITH_SETJMP 2216 # define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_pvMem, a_bMapInfo, a_u16FSW) do { \ 2217 RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); \ 2218 if ( !(a_u16FSW & X86_FSW_ES) \ 2219 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \ 2220 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \ 2221 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W)); \ 2222 else \ 2223 iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \ 2224 } while (0) 2225 #else 2226 # define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_pvMem, a_bMapInfo, a_u16FSW) do { \ 2227 if ( !(a_u16FSW & X86_FSW_ES) \ 2228 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \ 2229 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \ 2230 iemMemCommitAndUnmapWoJmp(pVCpu, (a_pvMem), a_bMapInfo); \ 2231 else \ 2232 iemMemRollbackAndUnmapWo(pVCpu, (a_pvMem), a_bMapInfo); \ 2233 } while (0) 2234 #endif 2235 2236 /** Rolls back (conceptually only, assumes no writes) and unmaps the guest memory. */ 2237 #ifndef IEM_WITH_SETJMP 2238 # define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_pvMem, a_bMapInfo) do { \ 2239 RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); \ 2240 iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \ 2241 } while (0) 2242 #else 2243 # define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_pvMem, a_bMapInfo) \ 2244 iemMemRollbackAndUnmapWo(pVCpu, (a_pvMem), a_bMapInfo) 2245 #endif 2122 2246 2123 2247
Note:
See TracChangeset
for help on using the changeset viewer.