- Timestamp:
- Jan 10, 2024 12:30:48 AM (13 months ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp
r101378 r102802 2132 2132 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16, (uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags)) 2133 2133 { 2134 iemAImpl_cmpxchg_u16_locked(pu16Dst, puAx, uSrcReg, pEFlags); 2134 # if 0 2135 /* If correctly aligned, used the locked variation. */ 2136 if (!((uintptr_t)pu16Dst & 1)) 2137 iemAImpl_cmpxchg_u16_locked(pu16Dst, puAx, uSrcReg, pEFlags); 2138 else 2139 # endif 2140 { 2141 /* Otherwise emulate it as best as we can. */ 2142 uint16_t const uOld = *puAx; 2143 uint16_t const uDst = *pu16Dst; 2144 if (uOld == uDst) 2145 { 2146 *pu16Dst = uSrcReg; 2147 iemAImpl_cmp_u16(&uOld, uOld, pEFlags); 2148 } 2149 else 2150 { 2151 *puAx = uDst; 2152 iemAImpl_cmp_u16(&uOld, uDst, pEFlags); 2153 } 2154 } 2135 2155 } 2136 2156 … … 2138 2158 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32, (uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags)) 2139 2159 { 2140 iemAImpl_cmpxchg_u32_locked(pu32Dst, puEax, uSrcReg, pEFlags); 2160 # if 0 2161 /* If correctly aligned, used the locked variation. */ 2162 if (!((uintptr_t)pu32Dst & 3)) 2163 iemAImpl_cmpxchg_u32_locked(pu32Dst, puEax, uSrcReg, pEFlags); 2164 else 2165 # endif 2166 { 2167 /* Otherwise emulate it as best as we can. */ 2168 uint32_t const uOld = *puEax; 2169 uint32_t const uDst = *pu32Dst; 2170 if (uOld == uDst) 2171 { 2172 *pu32Dst = uSrcReg; 2173 iemAImpl_cmp_u32(&uOld, uOld, pEFlags); 2174 } 2175 else 2176 { 2177 *puEax = uDst; 2178 iemAImpl_cmp_u32(&uOld, uDst, pEFlags); 2179 } 2180 } 2141 2181 } 2142 2182 … … 2145 2185 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags)) 2146 2186 { 2147 iemAImpl_cmpxchg_u64_locked(pu64Dst, puRax, puSrcReg, pEFlags); 2148 } 2149 # else 2187 # if 0 2188 /* If correctly aligned, used the locked variation. */ 2189 if (!((uintptr_t)pu32Dst & 7)) 2190 iemAImpl_cmpxchg_u64_locked(pu64Dst, puRax, puSrcReg, pEFlags); 2191 else 2192 # endif 2193 { 2194 /* Otherwise emulate it as best as we can. */ 2195 uint64_t const uOld = *puRax; 2196 uint64_t const uSrc = *puSrcReg; 2197 uint64_t const uDst = *pu64Dst; 2198 if (uOld == uDst) 2199 { 2200 *pu64Dst = uSrc; 2201 iemAImpl_cmp_u64(&uOld, uOld, pEFlags); 2202 } 2203 else 2204 { 2205 *puRax = uDst; 2206 iemAImpl_cmp_u64(&uOld, uDst, pEFlags); 2207 } 2208 } 2209 } 2210 # else /* ARCH_BITS != 32 */ 2150 2211 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags)) 2151 2212 { 2152 iemAImpl_cmpxchg_u64_locked(pu64Dst, puRax, uSrcReg, pEFlags); 2153 } 2213 # if 0 2214 /* If correctly aligned, used the locked variation. */ 2215 if (!((uintptr_t)pu64Dst & 7)) 2216 iemAImpl_cmpxchg_u64_locked(pu64Dst, puRax, uSrcReg, pEFlags); 2217 else 2218 # endif 2219 { 2220 /* Otherwise emulate it as best as we can. */ 2221 uint64_t const uOld = *puRax; 2222 uint64_t const uDst = *pu64Dst; 2223 if (uOld == uDst) 2224 { 2225 *pu64Dst = uSrcReg; 2226 iemAImpl_cmp_u64(&uOld, uOld, pEFlags); 2227 } 2228 else 2229 { 2230 *puRax = uDst; 2231 iemAImpl_cmp_u64(&uOld, uDst, pEFlags); 2232 } 2233 } 2234 } 2235 # endif /* ARCH_BITS != 32 */ 2236 2237 2238 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx, uint32_t *pEFlags)) 2239 { 2240 # if 0 2241 /* If correctly aligned, used the locked variation. */ 2242 if (!((uintptr_t)pu64Dst & 7)) 2243 iemAImpl_cmpxchg8b_locked(pu64Dst, pu64EaxEdx, pu64EbxEcx, pEFlags); 2244 else 2154 2245 # endif 2155 2156 2157 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx, uint32_t *pEFlags)) 2158 { 2159 iemAImpl_cmpxchg8b_locked(pu64Dst, pu64EaxEdx, pu64EbxEcx, pEFlags); 2246 { 2247 /* Otherwise emulate it as best as we can. */ 2248 uint64_t const uNew = pu64EbxEcx->u; 2249 uint64_t const uOld = pu64EaxEdx->u; 2250 uint64_t const uDst = *pu64Dst; 2251 if (uDst == uOld) 2252 { 2253 *pu64Dst = uNew; 2254 *pEFlags |= X86_EFL_ZF; 2255 } 2256 else 2257 { 2258 pu64EaxEdx->u = uDst; 2259 *pEFlags &= ~X86_EFL_ZF; 2260 } 2261 } 2160 2262 } 2161 2263 … … 2164 2266 uint32_t *pEFlags)) 2165 2267 { 2166 iemAImpl_cmpxchg16b_locked(pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags); 2268 # if 0 2269 /* If correctly aligned, used the locked variation. */ 2270 if (!((uintptr_t)pu64Dst & 15)) 2271 iemAImpl_cmpxchg16b_locked(pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags); 2272 else 2273 # endif 2274 { 2275 /* Otherwise emulate it as best as we can. */ 2276 # ifdef RT_COMPILER_WITH_128BIT_INT_TYPES 2277 uint128_t const uNew = pu128RbxRcx->u; 2278 uint128_t const uOld = pu128RaxRdx->u; 2279 uint128_t const uDst = pu128Dst->u; 2280 if (uDst == uOld) 2281 { 2282 pu128Dst->u = uNew; 2283 *pEFlags |= X86_EFL_ZF; 2284 } 2285 else 2286 { 2287 pu128RaxRdx->u = uDst; 2288 *pEFlags &= ~X86_EFL_ZF; 2289 } 2290 # else 2291 RTUINT128U const uNew = *pu128RbxRcx; 2292 RTUINT128U const uOld = *pu128RaxRdx; 2293 RTUINT128U const uDst = *pu128Dst; 2294 if ( uDst.s.Lo == uOld.s.Lo 2295 && uDst.s.Hi == uOld.s.Hi) 2296 { 2297 *pu128Dst = uNew; 2298 *pEFlags |= X86_EFL_ZF; 2299 } 2300 else 2301 { 2302 *pu128RaxRdx = uDst; 2303 *pEFlags &= ~X86_EFL_ZF; 2304 } 2305 # endif 2306 } 2167 2307 } 2168 2308
Note:
See TracChangeset
for help on using the changeset viewer.