Changeset 104403 in vbox for trunk/src/VBox/VMM/include
- Timestamp:
- Apr 23, 2024 9:51:08 AM (7 months ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h
r104402 r104403 983 983 if (offVCpu < (unsigned)_4K) 984 984 { 985 uint32_t * pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);986 p u32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/,iGprDst, IEMNATIVE_REG_FIXED_PVMCPU, offVCpu);985 uint32_t * const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 986 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, IEMNATIVE_REG_FIXED_PVMCPU, offVCpu); 987 987 } 988 988 else if (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx) < (unsigned)_4K) 989 989 { 990 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 991 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, IEMNATIVE_REG_FIXED_PCPUMCTX, 992 offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)); 990 uint32_t * const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 991 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, IEMNATIVE_REG_FIXED_PCPUMCTX, 992 offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)); 993 } 994 else if (offVCpu <= 0xffffffU) 995 { 996 uint32_t * const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 997 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, IEMNATIVE_REG_FIXED_PVMCPU, offVCpu >> 12, 998 true /*f64Bit*/, false /*fSetFlags*/, true /*fShift12*/); 999 if (offVCpu & 0xfffU) 1000 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, offVCpu & 0xfff); 993 1001 } 994 1002 else … … 996 1004 Assert(iGprDst != IEMNATIVE_REG_FIXED_PVMCPU); 997 1005 off = iemNativeEmitLoadGprImm64(pReNative, off, iGprDst, offVCpu); 998 uint32_t * pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);999 p u32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, IEMNATIVE_REG_FIXED_PCPUMCTX, iGprDst);1006 uint32_t * const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1007 pCodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, IEMNATIVE_REG_FIXED_PCPUMCTX, iGprDst); 1000 1008 } 1001 1009 … … 1809 1817 /** 1810 1818 * Emits a gprdst = gprsrc + addend load. 1811 * @note The adde d is 32-bit for AMD64 and 64-bit for ARM64.1819 * @note The addend is 32-bit for AMD64 and 64-bit for ARM64. 1812 1820 */ 1813 1821 #ifdef RT_ARCH_AMD64 … … 2209 2217 2210 2218 #elif defined(RT_ARCH_ARM64) 2211 if ((uint32_t)offDisp < (unsigned)_4K) 2212 { 2213 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2214 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, ARMV8_A64_REG_BP, (uint32_t)offDisp); 2215 } 2216 else if ((uint32_t)-offDisp < (unsigned)_4K) 2217 { 2218 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2219 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, ARMV8_A64_REG_BP, (uint32_t)-offDisp); 2219 bool const fSub = offDisp < 0; 2220 uint32_t const offAbsDisp = (uint32_t)RT_ABS(offDisp); 2221 if (offAbsDisp <= 0xffffffU) 2222 { 2223 uint32_t * const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 2224 if (offAbsDisp <= 0xfffU) 2225 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, ARMV8_A64_REG_BP, offAbsDisp); 2226 else 2227 { 2228 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, ARMV8_A64_REG_BP, offAbsDisp >> 12, 2229 true /*f64Bit*/, false /*fSetFlags*/, true /*fShift12*/); 2230 if (offAbsDisp & 0xfffU) 2231 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprDst, offAbsDisp & 0xfff); 2232 } 2220 2233 } 2221 2234 else 2222 2235 { 2223 2236 Assert(iGprDst != IEMNATIVE_REG_FIXED_PVMCPU); 2224 off = iemNativeEmitLoadGprImm64(pReNative, off, iGprDst, offDisp >= 0 ? (uint32_t)offDisp : (uint32_t)-offDisp); 2225 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2226 if (offDisp >= 0) 2227 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, ARMV8_A64_REG_BP, iGprDst); 2228 else 2229 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(true /*fSub*/, iGprDst, ARMV8_A64_REG_BP, iGprDst); 2237 off = iemNativeEmitLoadGprImm64(pReNative, off, iGprDst, offAbsDisp); 2238 uint32_t * const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 2239 pCodeBuf[off++] = Armv8A64MkInstrAddSubReg(fSub, iGprDst, ARMV8_A64_REG_BP, iGprDst); 2230 2240 } 2231 2241 … … 2338 2348 } 2339 2349 2340 2341 2350 #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR 2351 2342 2352 /** 2343 2353 * Emits a 128-bit vector register store with an BP relative destination address. … … 2421 2431 #endif 2422 2432 } 2423 #endif 2424 2433 2434 #endif /* IEMNATIVE_WITH_SIMD_REG_ALLOCATOR */ 2425 2435 #if defined(RT_ARCH_ARM64) 2426 2436 … … 4127 4137 #elif defined(RT_ARCH_ARM64) 4128 4138 uint64_t const uAbsAddend = (uint64_t)RT_ABS(iAddend); 4129 if (uAbsAddend < 4096) 4130 { 4131 if (iAddend >= 0) 4132 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, (uint32_t)uAbsAddend); 4133 else 4134 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, (uint32_t)uAbsAddend); 4135 } 4136 else if (uAbsAddend <= 0xfff000 && !(uAbsAddend & 0xfff)) 4137 { 4138 if (iAddend >= 0) 4139 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, (uint32_t)uAbsAddend >> 12, 4140 true /*f64Bit*/, true /*fShift12*/); 4141 else 4142 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, (uint32_t)uAbsAddend >> 12, 4143 true /*f64Bit*/, true /*fShift12*/); 4139 if (uAbsAddend <= 0xffffffU) 4140 { 4141 bool const fSub = iAddend < 0; 4142 if (uAbsAddend > 0xfffU) 4143 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprDst, uAbsAddend >> 12, true /*f64Bit*/, 4144 false /*fSetFlags*/, true /*fShift12*/); 4145 if (uAbsAddend & 0xfffU) 4146 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprDst, uAbsAddend & UINT32_C(0xfff)); 4144 4147 } 4145 4148 else if (iGprTmp != UINT8_MAX) … … 4200 4203 4201 4204 #elif defined(RT_ARCH_ARM64) 4202 if ((uint64_t)RT_ABS(iAddend) < RT_BIT_32(12)) 4203 { 4204 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 4205 if (iAddend >= 0) 4206 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint32_t)iAddend); 4207 else 4208 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, (uint32_t)-iAddend); 4205 bool const fSub = iAddend < 0; 4206 uint64_t const uAbsAddend = (uint64_t)RT_ABS(iAddend); 4207 if (uAbsAddend <= 0xffffffU) 4208 { 4209 uint32_t * const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 4210 if (uAbsAddend > 0xfffU) 4211 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprDst, uAbsAddend >> 12, true /*f64Bit*/, 4212 false /*fSetFlags*/, true /*fShift12*/); 4213 if (uAbsAddend & 0xfffU) 4214 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprDst, uAbsAddend & UINT32_C(0xfff)); 4209 4215 } 4210 4216 else … … 4215 4221 /* add gprdst, gprdst, tmpreg */ 4216 4222 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 4217 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(f alse /*fSub*/, iGprDst, iGprDst, iTmpReg);4223 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(fSub, iGprDst, iGprDst, iTmpReg); 4218 4224 4219 4225 iemNativeRegFreeTmpImm(pReNative, iTmpReg); … … 4231 4237 * Emits a 32-bit GPR additions with a 32-bit signed immediate. 4232 4238 * @note Bits 32 thru 63 in the GPR will be zero after the operation. 4233 * @note For ARM64 the iAddend value must be in the range 0x000..0xfff, 4234 * or that range shifted 12 bits to the left (e.g. 0x1000..0xfff000 with 4235 * the lower 12 bits always zero). The negative ranges are also allowed, 4236 * making it behave like a subtraction. If the constant does not conform, 4237 * bad stuff will happen. 4239 * @note For ARM64 the iAddend value must be in the range 0x000000..0xffffff. 4240 * The negative ranges are also allowed, making it behave like a 4241 * subtraction. If the constant does not conform, bad stuff will happen. 4238 4242 */ 4239 4243 DECL_FORCE_INLINE_THROW(uint32_t) … … 4256 4260 #elif defined(RT_ARCH_ARM64) 4257 4261 uint32_t const uAbsAddend = (uint32_t)RT_ABS(iAddend); 4258 if (uAbsAddend <= 0xfff) 4259 { 4260 if (iAddend >= 0) 4261 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, uAbsAddend, false /*f64Bit*/); 4262 else 4263 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, uAbsAddend, false /*f64Bit*/); 4264 } 4265 else if (uAbsAddend <= 0xfff000 && !(uAbsAddend & 0xfff)) 4266 { 4267 if (iAddend >= 0) 4268 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, uAbsAddend >> 12, 4269 false /*f64Bit*/, true /*fShift12*/); 4270 else 4271 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, uAbsAddend >> 12, 4272 false /*f64Bit*/, true /*fShift12*/); 4262 if (uAbsAddend <= 0xffffffU) 4263 { 4264 bool const fSub = iAddend < 0; 4265 if (uAbsAddend > 0xfffU) 4266 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprDst, uAbsAddend >> 12, false /*f64Bit*/, 4267 false /*fSetFlags*/, true /*fShift12*/); 4268 if (uAbsAddend & 0xfffU) 4269 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprDst, uAbsAddend & 0xfff, false /*f64Bit*/); 4273 4270 } 4274 4271 else … … 4297 4294 4298 4295 #elif defined(RT_ARCH_ARM64) 4299 if ((uint64_t)RT_ABS(iAddend) < RT_BIT_32(12)) 4300 { 4301 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 4302 if (iAddend >= 0) 4303 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint32_t)iAddend, false /*f64Bit*/); 4304 else 4305 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, (uint32_t)-iAddend, false /*f64Bit*/); 4296 bool const fSub = iAddend < 0; 4297 uint32_t const uAbsAddend = (uint32_t)RT_ABS(iAddend); 4298 if (uAbsAddend <= 0xffffffU) 4299 { 4300 uint32_t * const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 4301 if (uAbsAddend > 0xfffU) 4302 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprDst, uAbsAddend >> 12, false /*f64Bit*/, 4303 false /*fSetFlags*/, true /*fShift12*/); 4304 if (uAbsAddend & 0xfffU) 4305 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprDst, uAbsAddend & 0xfff, false /*f64Bit*/); 4306 4306 } 4307 4307 else … … 4312 4312 /* add gprdst, gprdst, tmpreg */ 4313 4313 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 4314 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(f alse /*fSub*/, iGprDst, iGprDst, iTmpReg, false /*f64Bit*/);4314 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(fSub, iGprDst, iGprDst, iTmpReg, false /*f64Bit*/); 4315 4315 4316 4316 iemNativeRegFreeTmpImm(pReNative, iTmpReg); … … 4333 4333 * @note AMD64: Will only update the lower 16 bits of the register. 4334 4334 * @note ARM64: Will update the entire register. 4335 * @note ARM64: Larger constants will require a temporary register. Failing to4336 * specify one when needed will trigger fatal assertion / throw.4337 4335 * @sa iemNativeEmitSubGpr16ImmEx 4338 4336 */ 4339 DECL_FORCE_INLINE_THROW(uint32_t) 4340 iemNativeEmitAddGpr16ImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, int16_t iAddend, 4341 uint8_t iGprTmp = UINT8_MAX) 4337 DECL_FORCE_INLINE(uint32_t) 4338 iemNativeEmitAddGpr16ImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, int16_t iAddend) 4342 4339 { 4343 4340 #ifdef RT_ARCH_AMD64 … … 4372 4369 pCodeBuf[off++] = RT_BYTE2((uint16_t)iAddend); 4373 4370 } 4374 RT_NOREF(iGprTmp); 4375 4376 #elif defined(RT_ARCH_ARM64) 4377 uint32_t uAbsAddend = RT_ABS(iAddend); 4378 if (uAbsAddend < 4096) 4379 { 4380 if (iAddend >= 0) 4381 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, uAbsAddend, false /*f64Bit*/); 4382 else 4383 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, uAbsAddend, false /*f64Bit*/); 4384 } 4385 else if (uAbsAddend <= 0xfff000 && !(uAbsAddend & 0xfff)) 4386 { 4387 if (iAddend >= 0) 4388 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, uAbsAddend >> 12, 4389 false /*f64Bit*/, false /*fSetFlags*/, true /*fShift*/); 4390 else 4391 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, uAbsAddend >> 12, 4392 false /*f64Bit*/, false /*fSetFlags*/, true /*fShift*/); 4393 } 4394 else if (iGprTmp != UINT8_MAX) 4395 { 4396 off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, iGprTmp, (uint32_t)iAddend); 4397 pCodeBuf[off++] = Armv8A64MkInstrAddReg(iGprDst, iGprDst, iGprTmp, false /*f64Bit*/); 4371 4372 #elif defined(RT_ARCH_ARM64) 4373 bool const fSub = iAddend < 0; 4374 uint32_t const uAbsAddend = (uint32_t)RT_ABS(iAddend); 4375 if (uAbsAddend > 0xfffU) 4376 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprDst, uAbsAddend >> 12, false /*f64Bit*/, 4377 false /*fSetFlags*/, true /*fShift12*/); 4378 if (uAbsAddend & 0xfffU) 4379 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprDst, uAbsAddend & 0xfff, false /*f64Bit*/); 4380 pCodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, 15, 0, false /*f64Bit*/); 4381 4382 #else 4383 # error "Port me" 4384 #endif 4385 return off; 4386 } 4387 4388 4389 4390 /** 4391 * Adds two 64-bit GPRs together, storing the result in a third register. 4392 */ 4393 DECL_FORCE_INLINE(uint32_t) 4394 iemNativeEmitGprEqGprPlusGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend1, uint8_t iGprAddend2) 4395 { 4396 #ifdef RT_ARCH_AMD64 4397 if (iGprDst != iGprAddend1 && iGprDst != iGprAddend2) 4398 { 4399 /** @todo consider LEA */ 4400 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, iGprDst, iGprAddend1); 4401 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, iGprDst, iGprAddend2); 4402 } 4403 else 4404 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, iGprDst, iGprDst != iGprAddend1 ? iGprAddend1 : iGprAddend2); 4405 4406 #elif defined(RT_ARCH_ARM64) 4407 pCodeBuf[off++] = Armv8A64MkInstrAddReg(iGprDst, iGprAddend1, iGprAddend2); 4408 4409 #else 4410 # error "Port me!" 4411 #endif 4412 return off; 4413 } 4414 4415 4416 4417 /** 4418 * Adds two 32-bit GPRs together, storing the result in a third register. 4419 * @note Bits 32 thru 63 in @a iGprDst will be zero after the operation. 4420 */ 4421 DECL_FORCE_INLINE(uint32_t) 4422 iemNativeEmitGpr32EqGprPlusGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend1, uint8_t iGprAddend2) 4423 { 4424 #ifdef RT_ARCH_AMD64 4425 if (iGprDst != iGprAddend1 && iGprDst != iGprAddend2) 4426 { 4427 /** @todo consider LEA */ 4428 off = iemNativeEmitLoadGprFromGpr32Ex(pCodeBuf, off, iGprDst, iGprAddend1); 4429 off = iemNativeEmitAddTwoGprs32Ex(pCodeBuf, off, iGprDst, iGprAddend2); 4430 } 4431 else 4432 off = iemNativeEmitAddTwoGprs32Ex(pCodeBuf, off, iGprDst, iGprDst != iGprAddend1 ? iGprAddend1 : iGprAddend2); 4433 4434 #elif defined(RT_ARCH_ARM64) 4435 pCodeBuf[off++] = Armv8A64MkInstrAddReg(iGprDst, iGprAddend1, iGprAddend2, false /*f64Bit*/); 4436 4437 #else 4438 # error "Port me!" 4439 #endif 4440 return off; 4441 } 4442 4443 4444 /** 4445 * Adds a 64-bit GPR and a 64-bit unsigned constant, storing the result in a 4446 * third register. 4447 * 4448 * @note The ARM64 version does not work for non-trivial constants if the 4449 * two registers are the same. Will assert / throw exception. 4450 */ 4451 DECL_FORCE_INLINE_THROW(uint32_t) 4452 iemNativeEmitGprEqGprPlusImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend, int64_t iImmAddend) 4453 { 4454 #ifdef RT_ARCH_AMD64 4455 /** @todo consider LEA */ 4456 if ((int8_t)iImmAddend == iImmAddend) 4457 { 4458 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, iGprDst, iGprAddend); 4459 off = iemNativeEmitAddGprImm8Ex(pCodeBuf, off, iGprDst, (int8_t)iImmAddend); 4460 } 4461 else 4462 { 4463 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprDst, iImmAddend); 4464 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, iGprDst, iGprAddend); 4465 } 4466 4467 #elif defined(RT_ARCH_ARM64) 4468 bool const fSub = iImmAddend < 0; 4469 uint64_t const uAbsImmAddend = RT_ABS(iImmAddend); 4470 if (uAbsImmAddend <= 0xfffU) 4471 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprAddend, uAbsImmAddend); 4472 else if (uAbsImmAddend <= 0xffffffU) 4473 { 4474 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprAddend, uAbsImmAddend >> 12, 4475 true /*f64Bit*/, false /*fSetFlags*/, true /*fShift12*/); 4476 if (uAbsImmAddend & 0xfffU) 4477 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprDst, uAbsImmAddend & UINT32_C(0xfff)); 4478 } 4479 else if (iGprDst != iGprAddend) 4480 { 4481 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprDst, (uint64_t)iImmAddend); 4482 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, iGprDst, iGprAddend); 4398 4483 } 4399 4484 else … … 4403 4488 AssertReleaseFailedStmt(off = UINT32_MAX); 4404 4489 # endif 4405 pCodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, 15, 0, false /*f64Bit*/);4406 4407 #else4408 # error "Port me"4409 #endif4410 return off;4411 }4412 4413 4414 4415 /**4416 * Adds two 64-bit GPRs together, storing the result in a third register.4417 */4418 DECL_FORCE_INLINE(uint32_t)4419 iemNativeEmitGprEqGprPlusGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend1, uint8_t iGprAddend2)4420 {4421 #ifdef RT_ARCH_AMD644422 if (iGprDst != iGprAddend1 && iGprDst != iGprAddend2)4423 {4424 /** @todo consider LEA */4425 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, iGprDst, iGprAddend1);4426 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, iGprDst, iGprAddend2);4427 }4428 else4429 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, iGprDst, iGprDst != iGprAddend1 ? iGprAddend1 : iGprAddend2);4430 4431 #elif defined(RT_ARCH_ARM64)4432 pCodeBuf[off++] = Armv8A64MkInstrAddReg(iGprDst, iGprAddend1, iGprAddend2);4433 4490 4434 4491 #else … … 4439 4496 4440 4497 4441 4442 /** 4443 * Adds two 32-bit GPRs together, storing the result in a third register. 4498 /** 4499 * Adds a 32-bit GPR and a 32-bit unsigned constant, storing the result in a 4500 * third register. 4501 * 4444 4502 * @note Bits 32 thru 63 in @a iGprDst will be zero after the operation. 4445 */4446 DECL_FORCE_INLINE(uint32_t)4447 iemNativeEmitGpr32EqGprPlusGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend1, uint8_t iGprAddend2)4448 {4449 #ifdef RT_ARCH_AMD644450 if (iGprDst != iGprAddend1 && iGprDst != iGprAddend2)4451 {4452 /** @todo consider LEA */4453 off = iemNativeEmitLoadGprFromGpr32Ex(pCodeBuf, off, iGprDst, iGprAddend1);4454 off = iemNativeEmitAddTwoGprs32Ex(pCodeBuf, off, iGprDst, iGprAddend2);4455 }4456 else4457 off = iemNativeEmitAddTwoGprs32Ex(pCodeBuf, off, iGprDst, iGprDst != iGprAddend1 ? iGprAddend1 : iGprAddend2);4458 4459 #elif defined(RT_ARCH_ARM64)4460 pCodeBuf[off++] = Armv8A64MkInstrAddReg(iGprDst, iGprAddend1, iGprAddend2, false /*f64Bit*/);4461 4462 #else4463 # error "Port me!"4464 #endif4465 return off;4466 }4467 4468 4469 /**4470 * Adds a 64-bit GPR and a 64-bit unsigned constant, storing the result in a4471 * third register.4472 4503 * 4473 4504 * @note The ARM64 version does not work for non-trivial constants if the … … 4475 4506 */ 4476 4507 DECL_FORCE_INLINE_THROW(uint32_t) 4477 iemNativeEmitGpr EqGprPlusImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend, int64_t iImmAddend)4508 iemNativeEmitGpr32EqGprPlusImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend, int32_t iImmAddend) 4478 4509 { 4479 4510 #ifdef RT_ARCH_AMD64 … … 4481 4512 if ((int8_t)iImmAddend == iImmAddend) 4482 4513 { 4483 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, iGprDst, iGprAddend);4484 off = iemNativeEmitAddGprImm8Ex(pCodeBuf, off, iGprDst, (int8_t)iImmAddend);4485 }4486 else4487 {4488 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprDst, iImmAddend);4489 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, iGprDst, iGprAddend);4490 }4491 4492 #elif defined(RT_ARCH_ARM64)4493 uint64_t const uAbsImmAddend = RT_ABS(iImmAddend);4494 if (uAbsImmAddend < 4096)4495 {4496 if (iImmAddend >= 0)4497 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprAddend, uAbsImmAddend);4498 else4499 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprAddend, uAbsImmAddend);4500 }4501 else if (uAbsImmAddend <= 0xfff000 && !(uAbsImmAddend & 0xfff))4502 {4503 if (iImmAddend >= 0)4504 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, uAbsImmAddend >> 12, true /*f64Bit*/, true /*fShift12*/);4505 else4506 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, uAbsImmAddend >> 12, true /*f64Bit*/, true /*fShift12*/);4507 }4508 else if (iGprDst != iGprAddend)4509 {4510 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprDst, (uint64_t)iImmAddend);4511 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, iGprDst, iGprAddend);4512 }4513 else4514 # ifdef IEM_WITH_THROW_CATCH4515 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));4516 # else4517 AssertReleaseFailedStmt(off = UINT32_MAX);4518 # endif4519 4520 #else4521 # error "Port me!"4522 #endif4523 return off;4524 }4525 4526 4527 /**4528 * Adds a 32-bit GPR and a 32-bit unsigned constant, storing the result in a4529 * third register.4530 *4531 * @note Bits 32 thru 63 in @a iGprDst will be zero after the operation.4532 *4533 * @note The ARM64 version does not work for non-trivial constants if the4534 * two registers are the same. Will assert / throw exception.4535 */4536 DECL_FORCE_INLINE_THROW(uint32_t)4537 iemNativeEmitGpr32EqGprPlusImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend, int32_t iImmAddend)4538 {4539 #ifdef RT_ARCH_AMD644540 /** @todo consider LEA */4541 if ((int8_t)iImmAddend == iImmAddend)4542 {4543 4514 off = iemNativeEmitLoadGprFromGpr32Ex(pCodeBuf, off, iGprDst, iGprAddend); 4544 4515 off = iemNativeEmitAddGpr32Imm8Ex(pCodeBuf, off, iGprDst, (int8_t)iImmAddend); … … 4551 4522 4552 4523 #elif defined(RT_ARCH_ARM64) 4524 bool const fSub = iImmAddend < 0; 4553 4525 uint32_t const uAbsImmAddend = RT_ABS(iImmAddend); 4554 if (uAbsImmAddend < 4096) 4555 { 4556 if (iImmAddend >= 0) 4557 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprAddend, uAbsImmAddend, false /*f64Bit*/); 4558 else 4559 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprAddend, uAbsImmAddend, false /*f64Bit*/); 4560 } 4561 else if (uAbsImmAddend <= 0xfff000 && !(uAbsImmAddend & 0xfff)) 4562 { 4563 if (iImmAddend >= 0) 4564 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, uAbsImmAddend >> 12, false /*f64Bit*/, true /*fShift12*/); 4565 else 4566 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, uAbsImmAddend >> 12, false /*f64Bit*/, true /*fShift12*/); 4526 if (uAbsImmAddend <= 0xfffU) 4527 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprAddend, uAbsImmAddend, false /*f64Bit*/); 4528 else if (uAbsImmAddend <= 0xffffffU) 4529 { 4530 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprAddend, uAbsImmAddend >> 12, 4531 false /*f64Bit*/, false /*fSetFlags*/, true /*fShift12*/); 4532 if (uAbsImmAddend & 0xfffU) 4533 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprDst, uAbsImmAddend & 0xfff, false /*f64Bit*/); 4567 4534 } 4568 4535 else if (iGprDst != iGprAddend) … … 6131 6098 true /*64Bit*/, true /*fSetFlags*/); 6132 6099 } 6133 else if ( uImm < RT_BIT_32(12+12) && (uImm & (_4K - 1)) == 0)6100 else if ((uImm & ~(uint64_t)0xfff000) == 0) 6134 6101 { 6135 6102 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); … … 6192 6159 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm, 6193 6160 false /*64Bit*/, true /*fSetFlags*/); 6194 else if ( uImm < RT_BIT_32(12+12) && (uImm & (_4K - 1)) == 0)6161 else if ((uImm & ~(uint32_t)0xfff000) == 0) 6195 6162 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm, 6196 6163 false /*64Bit*/, true /*fSetFlags*/, true /*fShift12*/); … … 6227 6194 false /*64Bit*/, true /*fSetFlags*/); 6228 6195 } 6229 else if ( uImm < RT_BIT_32(12+12) && (uImm & (_4K - 1)) == 0)6196 else if ((uImm & ~(uint32_t)0xfff000) == 0) 6230 6197 { 6231 6198 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
Note:
See TracChangeset
for help on using the changeset viewer.