- Timestamp:
- Oct 1, 2024 9:05:44 AM (7 months ago)
- svn:sync-xref-src-repo-rev:
- 164971
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/iprt/x86.h
r106061 r106187 190 190 /** Bit 1 - Reserved, reads as 1. */ 191 191 #define X86_EFL_1 RT_BIT_32(1) 192 #define X86_EFL_1_BIT 1 192 193 /** Bit 2 - PF - Parity flag - Status flag. */ 193 194 #define X86_EFL_PF RT_BIT_32(2) -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompBltIn.cpp
r106180 r106187 323 323 /* jz nothing_pending */ 324 324 uint32_t const offFixup1 = off; 325 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 64, kIemNativeInstrCond_e); 325 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, IEMNATIVE_HAS_POSTPONED_EFLAGS_CALCS(pReNative) ? off + 512 : off + 64, 326 kIemNativeInstrCond_e); 326 327 327 328 # elif defined(RT_ARCH_ARM64) … … 334 335 uint32_t const offFixup1 = off; 335 336 off = iemNativeEmitTestIfGprIsZeroOrNotZeroAndJmpToFixedEx(pCodeBuf, off, idxTmpReg1, true /*f64Bit*/, 336 false /*fJmpIfNotZero*/, off + 16);337 false /*fJmpIfNotZero*/, off); 337 338 # else 338 339 # error "port me" … … 373 374 */ 374 375 iemNativeFixupFixedJump(pReNative, offFixup1, off); 376 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 375 377 } 376 378 … … 1439 1441 off = iemNativeVarSaveVolatileRegsPreHlpCall(pReNative, off, fHstRegsNotToSave); 1440 1442 1443 #ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 1444 /* Do delayed EFLAGS calculations. There are no restrictions on volatile registers here. */ 1445 off = iemNativeDoPostponedEFlagsAtTlbMiss<0>(pReNative, off, &TlbState, fHstRegsNotToSave); 1446 #endif 1447 1441 1448 /* IEMNATIVE_CALL_ARG1_GREG = offInstr */ 1442 1449 off = iemNativeEmitLoadGpr8Imm(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, offInstr); … … 1446 1453 1447 1454 /* Done setting up parameters, make the call. */ 1448 off = iemNativeEmitCallImm (pReNative, off, (uintptr_t)iemNativeHlpMemCodeNewPageTlbMissWithOff);1455 off = iemNativeEmitCallImm<true /*a_fSkipEflChecks*/>(pReNative, off, (uintptr_t)iemNativeHlpMemCodeNewPageTlbMissWithOff); 1449 1456 1450 1457 /* Move the result to the right register. */ … … 1736 1743 off = iemNativeVarSaveVolatileRegsPreHlpCall(pReNative, off, fHstRegsNotToSave); 1737 1744 1745 #ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 1746 /* Do delayed EFLAGS calculations. There are no restrictions on volatile registers here. */ 1747 off = iemNativeDoPostponedEFlagsAtTlbMiss<0>(pReNative, off, &TlbState, fHstRegsNotToSave); 1748 #endif 1749 1738 1750 /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */ 1739 1751 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 1740 1752 1741 1753 /* Done setting up parameters, make the call. */ 1742 off = iemNativeEmitCallImm (pReNative, off, (uintptr_t)iemNativeHlpMemCodeNewPageTlbMiss);1754 off = iemNativeEmitCallImm<true /*a_fSkipEflChecks*/>(pReNative, off, (uintptr_t)iemNativeHlpMemCodeNewPageTlbMiss); 1743 1755 1744 1756 /* Restore variables and guest shadow registers to volatile registers. */ -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h
r106180 r106187 1753 1753 } 1754 1754 1755 #ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 1756 /* Do delayed EFLAGS calculations. */ 1757 off = iemNativeDoPostponedEFlagsAtTlbMiss< RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) 1758 | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG)>(pReNative, off, &TlbState, fHstRegsNotToSave); 1759 #endif 1760 1755 1761 /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */ 1756 1762 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 1757 1763 1758 1764 /* Done setting up parameters, make the call. */ 1759 off = iemNativeEmitCallImm (pReNative, off, pfnFunction);1765 off = iemNativeEmitCallImm<true /*a_fSkipEflChecks*/>(pReNative, off, pfnFunction); 1760 1766 1761 1767 /* Restore variables and guest shadow registers to volatile registers. */ … … 2451 2457 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxRegEffSp); 2452 2458 2459 #ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 2460 /* Do delayed EFLAGS calculations. */ 2461 off = iemNativeDoPostponedEFlagsAtTlbMiss<RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG)>(pReNative, off, &TlbState, fHstRegsNotToSave); 2462 #endif 2463 2453 2464 /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */ 2454 2465 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 2455 2466 2456 2467 /* Done setting up parameters, make the call. */ 2457 off = iemNativeEmitCallImm (pReNative, off, pfnFunction);2468 off = iemNativeEmitCallImm<true /*a_fSkipEflChecks*/>(pReNative, off, pfnFunction); 2458 2469 2459 2470 /* Move the return register content to idxRegMemResult. */ … … 6263 6274 /* Updating the skipping according to the outputs is a little early, but 6264 6275 we don't have any other hooks for references atm. */ 6265 if RT_CONSTEXPR ((a_fEflOutput & X86_EFL_STATUS_BITS) == X86_EFL_STATUS_BITS)6276 if RT_CONSTEXPR_IF((a_fEflOutput & X86_EFL_STATUS_BITS) == X86_EFL_STATUS_BITS) 6266 6277 off = iemNativeEmitStoreImmToVCpuU32(pReNative, off, 0, RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags)); 6267 else if RT_CONSTEXPR ((a_fEflOutput & X86_EFL_STATUS_BITS) != 0)6278 else if RT_CONSTEXPR_IF((a_fEflOutput & X86_EFL_STATUS_BITS) != 0) 6268 6279 off = iemNativeEmitAndImmIntoVCpuU32(pReNative, off, ~(a_fEflOutput & X86_EFL_STATUS_BITS), 6269 6280 RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags)); … … 7155 7166 * and IEM_MC_FETCH_MEM_FLAT_U8/16/32/64 and IEM_MC_STORE_MEM_FLAT_U8/16/32/64 7156 7167 * (with iSegReg = UINT8_MAX). */ 7168 /** @todo Pass enmOp, cbMem, fAlignMaskAndClt and a iSegReg == UINT8_MAX 7169 * indicator as template parameters. */ 7157 7170 DECL_INLINE_THROW(uint32_t) 7158 7171 iemNativeEmitMemFetchStoreDataCommon(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarValue, uint8_t iSegReg, … … 7479 7492 * For SIMD based variables we pass the reference on the stack for both fetches and stores. 7480 7493 * 7481 * @noteThere was a register variable assigned to the variable for the TlbLookup case above7494 * Note! There was a register variable assigned to the variable for the TlbLookup case above 7482 7495 * which must not be freed or the value loaded into the register will not be synced into the register 7483 7496 * further down the road because the variable doesn't know it had a variable assigned. 7484 7497 * 7485 * @noteFor loads it is not required to sync what is in the assigned register with the stack slot7498 * Note! For loads it is not required to sync what is in the assigned register with the stack slot 7486 7499 * as it will be overwritten anyway. 7487 7500 */ … … 7520 7533 } 7521 7534 7535 #ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 7536 /* Do delayed EFLAGS calculations. */ 7537 if (enmOp == kIemNativeEmitMemOp_Store || cbMem == sizeof(RTUINT128U) || cbMem == sizeof(RTUINT256U)) 7538 { 7539 if (iSegReg == UINT8_MAX) 7540 off = iemNativeDoPostponedEFlagsAtTlbMiss< RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) 7541 | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG)>(pReNative, off, &TlbState, 7542 fHstRegsNotToSave); 7543 else 7544 off = iemNativeDoPostponedEFlagsAtTlbMiss< RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) 7545 | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG) 7546 | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG)>(pReNative, off, &TlbState, 7547 fHstRegsNotToSave); 7548 } 7549 else if (iSegReg == UINT8_MAX) 7550 off = iemNativeDoPostponedEFlagsAtTlbMiss< RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG)>(pReNative, off, &TlbState, 7551 fHstRegsNotToSave); 7552 else 7553 off = iemNativeDoPostponedEFlagsAtTlbMiss< RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) 7554 | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG)>(pReNative, off, &TlbState, 7555 fHstRegsNotToSave); 7556 #endif 7557 7522 7558 /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */ 7523 7559 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 7524 7560 7525 7561 /* Done setting up parameters, make the call. */ 7526 off = iemNativeEmitCallImm (pReNative, off, pfnFunction);7562 off = iemNativeEmitCallImm<true /*a_fSkipEflChecks*/>(pReNative, off, pfnFunction); 7527 7563 7528 7564 /* … … 8501 8537 } 8502 8538 8539 #ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 8540 /* Do delayed EFLAGS calculations. */ 8541 off = iemNativeDoPostponedEFlagsAtTlbMiss< RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) 8542 | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG)>(pReNative, off, &TlbState, fHstRegsNotToSave); 8543 #endif 8544 8503 8545 /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */ 8504 8546 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 8505 8547 8506 8548 /* Done setting up parameters, make the call. */ 8507 off = iemNativeEmitCallImm (pReNative, off, pfnFunction);8549 off = iemNativeEmitCallImm<true /*a_fSkipEflChecks*/>(pReNative, off, pfnFunction); 8508 8550 8509 8551 /* Restore variables and guest shadow registers to volatile registers. */ … … 8844 8886 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxRegEffSp); 8845 8887 8888 #ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 8889 /* Do delayed EFLAGS calculations. */ 8890 off = iemNativeDoPostponedEFlagsAtTlbMiss<RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG)>(pReNative, off, &TlbState, fHstRegsNotToSave); 8891 #endif 8892 8846 8893 /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */ 8847 8894 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 8848 8895 8849 8896 /* Done setting up parameters, make the call. */ 8850 off = iemNativeEmitCallImm (pReNative, off, pfnFunction);8897 off = iemNativeEmitCallImm<true /*a_fSkipEflChecks*/>(pReNative, off, pfnFunction); 8851 8898 8852 8899 /* Move the return register content to idxRegMemResult. */ … … 9467 9514 } 9468 9515 9516 #ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 9517 /* Do delayed EFLAGS calculations. */ 9518 if (iSegReg == UINT8_MAX) 9519 off = iemNativeDoPostponedEFlagsAtTlbMiss<RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG)>(pReNative, off, &TlbState, 9520 fHstRegsNotToSave); 9521 else 9522 off = iemNativeDoPostponedEFlagsAtTlbMiss< RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG) 9523 | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG)>(pReNative, off, &TlbState, 9524 fHstRegsNotToSave); 9525 #endif 9526 9469 9527 /* IEMNATIVE_CALL_ARG1_GREG = &idxVarUnmapInfo; stackslot address, load any register with result after the call. */ 9470 9528 int32_t const offBpDispVarUnmapInfo = iemNativeStackCalcBpDisp(iemNativeVarGetStackSlot(pReNative, idxVarUnmapInfo)); … … 9475 9533 9476 9534 /* Done setting up parameters, make the call. */ 9477 off = iemNativeEmitCallImm (pReNative, off, pfnFunction);9535 off = iemNativeEmitCallImm<true /*a_fSkipEflChecks*/>(pReNative, off, pfnFunction); 9478 9536 9479 9537 /* … … 9653 9711 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 9654 9712 9655 /* Done setting up parameters, make the call. */ 9656 off = iemNativeEmitCallImm(pReNative, off, pfnFunction); 9713 /* Done setting up parameters, make the call. 9714 Note! Since we can only end up here if we took a TLB miss, any postponed EFLAGS 9715 calculations has been done there already. Thus, a_fSkipEflChecks = true. */ 9716 off = iemNativeEmitCallImm<true /*a_fSkipEflChecks*/>(pReNative, off, pfnFunction); 9657 9717 9658 9718 /* The bUnmapInfo variable is implictly free by these MCs. */ -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r106180 r106187 5959 5959 5960 5960 5961 /** 5962 * Loads the guest shadow register @a enmGstReg into host reg @a idxHstReg, zero 5963 * extending to 64-bit width, extended version. 5964 * 5965 * @returns New code buffer offset on success, UINT32_MAX on failure. 5966 * @param pCodeBuf The code buffer. 5967 * @param off The current code buffer position. 5968 * @param idxHstReg The host register to load the guest register value into. 5969 * @param enmGstReg The guest register to load. 5970 * 5971 * @note This does not mark @a idxHstReg as having a shadow copy of @a enmGstReg, 5972 * that is something the caller needs to do if applicable. 5973 */ 5974 DECL_HIDDEN_THROW(uint32_t) 5975 iemNativeEmitLoadGprWithGstShadowRegEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t idxHstReg, IEMNATIVEGSTREG enmGstReg) 5976 { 5977 Assert((unsigned)enmGstReg < (unsigned)kIemNativeGstReg_End); 5978 Assert(g_aGstShadowInfo[enmGstReg].cb != 0); 5979 5980 switch (g_aGstShadowInfo[enmGstReg].cb) 5981 { 5982 case sizeof(uint64_t): 5983 return iemNativeEmitLoadGprFromVCpuU64Ex(pCodeBuf, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off); 5984 case sizeof(uint32_t): 5985 return iemNativeEmitLoadGprFromVCpuU32Ex(pCodeBuf, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off); 5986 case sizeof(uint16_t): 5987 return iemNativeEmitLoadGprFromVCpuU16Ex(pCodeBuf, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off); 5988 #if 0 /* not present in the table. */ 5989 case sizeof(uint8_t): 5990 return iemNativeEmitLoadGprFromVCpuU8Ex(pCodeBuf, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off); 5991 #endif 5992 default: 5993 #ifdef IEM_WITH_THROW_CATCH 5994 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IPE_NOT_REACHED_DEFAULT_CASE)); 5995 #else 5996 AssertReleaseFailedReturn(off); 5997 #endif 5998 } 5999 } 6000 6001 5961 6002 #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR 5962 6003 /** … … 6003 6044 * Trashes EFLAGS on AMD64. 6004 6045 */ 6005 DECL_ HIDDEN_THROW(uint32_t)6006 iemNativeEmitTop32BitsClearCheck (PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxReg)6046 DECL_FORCE_INLINE(uint32_t) 6047 iemNativeEmitTop32BitsClearCheckEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t idxReg) 6007 6048 { 6008 6049 # ifdef RT_ARCH_AMD64 6009 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20);6010 6011 6050 /* rol reg64, 32 */ 6012 p bCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_B);6013 p bCodeBuf[off++] = 0xc1;6014 p bCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7);6015 p bCodeBuf[off++] = 32;6051 pCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_B); 6052 pCodeBuf[off++] = 0xc1; 6053 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7); 6054 pCodeBuf[off++] = 32; 6016 6055 6017 6056 /* test reg32, ffffffffh */ 6018 6057 if (idxReg >= 8) 6019 p bCodeBuf[off++] = X86_OP_REX_B;6020 p bCodeBuf[off++] = 0xf7;6021 p bCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7);6022 p bCodeBuf[off++] = 0xff;6023 p bCodeBuf[off++] = 0xff;6024 p bCodeBuf[off++] = 0xff;6025 p bCodeBuf[off++] = 0xff;6058 pCodeBuf[off++] = X86_OP_REX_B; 6059 pCodeBuf[off++] = 0xf7; 6060 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7); 6061 pCodeBuf[off++] = 0xff; 6062 pCodeBuf[off++] = 0xff; 6063 pCodeBuf[off++] = 0xff; 6064 pCodeBuf[off++] = 0xff; 6026 6065 6027 6066 /* je/jz +1 */ 6028 p bCodeBuf[off++] = 0x74;6029 p bCodeBuf[off++] = 0x01;6067 pCodeBuf[off++] = 0x74; 6068 pCodeBuf[off++] = 0x01; 6030 6069 6031 6070 /* int3 */ 6032 p bCodeBuf[off++] = 0xcc;6071 pCodeBuf[off++] = 0xcc; 6033 6072 6034 6073 /* rol reg64, 32 */ 6035 p bCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_B);6036 p bCodeBuf[off++] = 0xc1;6037 p bCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7);6038 p bCodeBuf[off++] = 32;6074 pCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_B); 6075 pCodeBuf[off++] = 0xc1; 6076 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7); 6077 pCodeBuf[off++] = 32; 6039 6078 6040 6079 # elif defined(RT_ARCH_ARM64) 6041 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);6042 6080 /* lsr tmp0, reg64, #32 */ 6043 p u32CodeBuf[off++] = Armv8A64MkInstrLsrImm(IEMNATIVE_REG_FIXED_TMP0, idxReg, 32);6081 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(IEMNATIVE_REG_FIXED_TMP0, idxReg, 32); 6044 6082 /* cbz tmp0, +1 */ 6045 p u32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 2, IEMNATIVE_REG_FIXED_TMP0);6083 pCodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 2, IEMNATIVE_REG_FIXED_TMP0); 6046 6084 /* brk #0x1100 */ 6047 p u32CodeBuf[off++] = Armv8A64MkInstrBrk(UINT32_C(0x1100));6085 pCodeBuf[off++] = Armv8A64MkInstrBrk(UINT32_C(0x1100)); 6048 6086 6049 6087 # else 6050 6088 # error "Port me!" 6051 6089 # endif 6090 return off; 6091 } 6092 6093 6094 /** 6095 * Emitting code that checks that the value of @a idxReg is UINT32_MAX or less. 6096 * 6097 * @note May of course trash IEMNATIVE_REG_FIXED_TMP0. 6098 * Trashes EFLAGS on AMD64. 6099 */ 6100 DECL_HIDDEN_THROW(uint32_t) 6101 iemNativeEmitTop32BitsClearCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxReg) 6102 { 6103 # ifdef RT_ARCH_AMD64 6104 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20); 6105 # elif defined(RT_ARCH_ARM64) 6106 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 6107 # else 6108 # error "Port me!" 6109 # endif 6110 off = iemNativeEmitTop32BitsClearCheckEx(pCodeBuf, off, idxReg); 6052 6111 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 6053 6112 return off; … … 6063 6122 * Trashes EFLAGS on AMD64. 6064 6123 */ 6065 DECL_HIDDEN_THROW(uint32_t) 6066 iemNativeEmitGuestRegValueCheck(PIEMRECOMPILERSTATE pReNative,uint32_t off, uint8_t idxReg, IEMNATIVEGSTREG enmGstReg)6124 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitGuestRegValueCheckEx(PIEMRECOMPILERSTATE pReNative, PIEMNATIVEINSTR pCodeBuf, 6125 uint32_t off, uint8_t idxReg, IEMNATIVEGSTREG enmGstReg) 6067 6126 { 6068 6127 #if defined(IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK) … … 6073 6132 6074 6133 # ifdef RT_ARCH_AMD64 6075 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);6076 6077 6134 /* cmp reg, [mem] */ 6078 6135 if (g_aGstShadowInfo[enmGstReg].cb == sizeof(uint8_t)) 6079 6136 { 6080 6137 if (idxReg >= 8) 6081 p bCodeBuf[off++] = X86_OP_REX_R;6082 p bCodeBuf[off++] = 0x38;6138 pCodeBuf[off++] = X86_OP_REX_R; 6139 pCodeBuf[off++] = 0x38; 6083 6140 } 6084 6141 else 6085 6142 { 6086 6143 if (g_aGstShadowInfo[enmGstReg].cb == sizeof(uint64_t)) 6087 p bCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_R);6144 pCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_R); 6088 6145 else 6089 6146 { 6090 6147 if (g_aGstShadowInfo[enmGstReg].cb == sizeof(uint16_t)) 6091 p bCodeBuf[off++] = X86_OP_PRF_SIZE_OP;6148 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP; 6092 6149 else 6093 6150 AssertStmt(g_aGstShadowInfo[enmGstReg].cb == sizeof(uint32_t), 6094 6151 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_LABEL_IPE_6)); 6095 6152 if (idxReg >= 8) 6096 p bCodeBuf[off++] = X86_OP_REX_R;6153 pCodeBuf[off++] = X86_OP_REX_R; 6097 6154 } 6098 p bCodeBuf[off++] = 0x39;6099 } 6100 off = iemNativeEmitGprByVCpuDisp(p bCodeBuf, off, idxReg, g_aGstShadowInfo[enmGstReg].off);6155 pCodeBuf[off++] = 0x39; 6156 } 6157 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, idxReg, g_aGstShadowInfo[enmGstReg].off); 6101 6158 6102 6159 /* je/jz +1 */ 6103 p bCodeBuf[off++] = 0x74;6104 p bCodeBuf[off++] = 0x01;6160 pCodeBuf[off++] = 0x74; 6161 pCodeBuf[off++] = 0x01; 6105 6162 6106 6163 /* int3 */ 6107 p bCodeBuf[off++] = 0xcc;6164 pCodeBuf[off++] = 0xcc; 6108 6165 6109 6166 /* For values smaller than the register size, we must check that the rest … … 6112 6169 { 6113 6170 /* test reg64, imm32 */ 6114 p bCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_B);6115 p bCodeBuf[off++] = 0xf7;6116 p bCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7);6117 p bCodeBuf[off++] = 0;6118 p bCodeBuf[off++] = g_aGstShadowInfo[enmGstReg].cb > sizeof(uint8_t) ? 0 : 0xff;6119 p bCodeBuf[off++] = 0xff;6120 p bCodeBuf[off++] = 0xff;6171 pCodeBuf[off++] = X86_OP_REX_W | (idxReg < 8 ? 0 : X86_OP_REX_B); 6172 pCodeBuf[off++] = 0xf7; 6173 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxReg & 7); 6174 pCodeBuf[off++] = 0; 6175 pCodeBuf[off++] = g_aGstShadowInfo[enmGstReg].cb > sizeof(uint8_t) ? 0 : 0xff; 6176 pCodeBuf[off++] = 0xff; 6177 pCodeBuf[off++] = 0xff; 6121 6178 6122 6179 /* je/jz +1 */ 6123 p bCodeBuf[off++] = 0x74;6124 p bCodeBuf[off++] = 0x01;6180 pCodeBuf[off++] = 0x74; 6181 pCodeBuf[off++] = 0x01; 6125 6182 6126 6183 /* int3 */ 6127 pbCodeBuf[off++] = 0xcc; 6128 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 6129 } 6130 else 6131 { 6132 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 6133 if (g_aGstShadowInfo[enmGstReg].cb == sizeof(uint32_t)) 6134 iemNativeEmitTop32BitsClearCheck(pReNative, off, idxReg); 6135 } 6184 pCodeBuf[off++] = 0xcc; 6185 } 6186 else if (g_aGstShadowInfo[enmGstReg].cb == sizeof(uint32_t)) 6187 iemNativeEmitTop32BitsClearCheckEx(pCodeBuf, off, idxReg); 6188 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 6136 6189 6137 6190 # elif defined(RT_ARCH_ARM64) 6138 6191 /* mov TMP0, [gstreg] */ 6139 off = iemNativeEmitLoadGprWithGstShadowReg(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, enmGstReg); 6140 6141 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 6192 off = iemNativeEmitLoadGprWithGstShadowRegEx(pCodeBuf, off, IEMNATIVE_REG_FIXED_TMP0, enmGstReg); 6193 6142 6194 /* sub tmp0, tmp0, idxReg */ 6143 p u32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(true /*fSub*/, IEMNATIVE_REG_FIXED_TMP0, IEMNATIVE_REG_FIXED_TMP0, idxReg);6144 /* cbz tmp0, + 1*/6145 p u32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/,2, IEMNATIVE_REG_FIXED_TMP0);6195 pCodeBuf[off++] = Armv8A64MkInstrAddSubReg(true /*fSub*/, IEMNATIVE_REG_FIXED_TMP0, IEMNATIVE_REG_FIXED_TMP0, idxReg); 6196 /* cbz tmp0, +2 */ 6197 pCodeBuf[off++] = Armv8A64MkInstrCbz(2, IEMNATIVE_REG_FIXED_TMP0); 6146 6198 /* brk #0x1000+enmGstReg */ 6147 p u32CodeBuf[off++] = Armv8A64MkInstrBrk((uint32_t)enmGstReg | UINT32_C(0x1000));6199 pCodeBuf[off++] = Armv8A64MkInstrBrk((uint32_t)enmGstReg | UINT32_C(0x1000)); 6148 6200 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 6149 6201 … … 6154 6206 } 6155 6207 6208 6209 /** 6210 * Emitting code that checks that the content of register @a idxReg is the same 6211 * as what's in the guest register @a enmGstReg, resulting in a breakpoint 6212 * instruction if that's not the case. 6213 * 6214 * @note May of course trash IEMNATIVE_REG_FIXED_TMP0. 6215 * Trashes EFLAGS on AMD64. 6216 */ 6217 DECL_HIDDEN_THROW(uint32_t) 6218 iemNativeEmitGuestRegValueCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxReg, IEMNATIVEGSTREG enmGstReg) 6219 { 6220 #ifdef RT_ARCH_AMD64 6221 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32); 6222 #elif defined(RT_ARCH_ARM64) 6223 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5); 6224 # else 6225 # error "Port me!" 6226 # endif 6227 return iemNativeEmitGuestRegValueCheckEx(pReNative, pCodeBuf, off, idxReg, enmGstReg); 6228 } 6156 6229 6157 6230 # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllN8veEmit-x86.h
r106180 r106187 230 230 if (fEFlags) 231 231 { 232 if RT_CONSTEXPR (a_fEflClobbered != X86_EFL_STATUS_BITS)232 if RT_CONSTEXPR_IF(a_fEflClobbered != X86_EFL_STATUS_BITS) 233 233 { 234 234 fEFlags &= ~a_fEflClobbered; … … 270 270 if (idxRegTmp == X86_GREG_xAX) 271 271 { 272 /* sahf ; AH = EFLAGS */273 pCodeBuf[off++] = 0x9 e;272 /* lahf ; AH = EFLAGS */ 273 pCodeBuf[off++] = 0x9f; 274 274 if (idxRegEfl <= X86_GREG_xBX) 275 275 { … … 308 308 pCodeBuf[off++] = 0x86; 309 309 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4 /*AH*/, 0 /*AL*/); 310 /* sahf ; AH = EFLAGS */311 pCodeBuf[off++] = 0x9 e;310 /* lahf ; AH = EFLAGS */ 311 pCodeBuf[off++] = 0x9f; 312 312 /* xchg al, ah */ 313 313 pCodeBuf[off++] = 0x86; … … 358 358 359 359 360 template<uint32_t const a_bmInputRegs> 361 static uint32_t iemNativeDoPostponedEFlagsAtTbExitInternal(PIEMRECOMPILERSTATE pReNative, uint32_t off, PIEMNATIVEINSTR pCodeBuf) 360 template<uint32_t const a_bmInputRegs, bool const a_fTlbMiss = false> 361 static uint32_t iemNativeDoPostponedEFlagsInternal(PIEMRECOMPILERSTATE pReNative, uint32_t off, PIEMNATIVEINSTR pCodeBuf, 362 uint32_t bmExtraTlbMissRegs = 0) 362 363 { 363 364 /* 364 * We can't do regular register allocations here, but since we're in an exit 365 * path where all pending writes has been flushed and we have a known set of 366 * registers with input for the exit label, we do our own simple stuff here. 365 * In the TB exit code path we cannot do regular register allocation. Nor 366 * can we when we're in the TLB miss code, unless we're skipping the TLB 367 * lookup. Since the latter isn't an important usecase and should get along 368 * fine on just volatile registers, we do not need to do anything special 369 * for it. 370 * 371 * So, we do our own register allocating here. Any register goes in the TB 372 * exit path, excluding a_bmInputRegs, fixed and postponed related registers. 373 * In the TLB miss we can use any volatile register and temporary registers 374 * allocated in the TLB state. 367 375 * 368 376 * Note! On x86 we prefer using RAX as the first TMP register, so we can … … 371 379 * shadow, since RAX is represented by bit 0 in the mask. 372 380 */ 373 uint32_t bmAvailableRegs = ~(a_bmInputRegs | IEMNATIVE_REG_FIXED_MASK) & IEMNATIVE_HST_GREG_MASK; 374 if (pReNative->PostponedEfl.idxReg2 != UINT8_MAX) 375 bmAvailableRegs &= ~(RT_BIT_32(pReNative->PostponedEfl.idxReg1) | RT_BIT_32(pReNative->PostponedEfl.idxReg2)); 381 uint32_t bmAvailableRegs; 382 if RT_CONSTEXPR_IF(!a_fTlbMiss) 383 { 384 bmAvailableRegs = ~(a_bmInputRegs | IEMNATIVE_REG_FIXED_MASK) & IEMNATIVE_HST_GREG_MASK; 385 if (pReNative->PostponedEfl.idxReg2 != UINT8_MAX) 386 bmAvailableRegs &= ~(RT_BIT_32(pReNative->PostponedEfl.idxReg1) | RT_BIT_32(pReNative->PostponedEfl.idxReg2)); 387 else 388 bmAvailableRegs &= ~RT_BIT_32(pReNative->PostponedEfl.idxReg1); 389 } 376 390 else 377 bmAvailableRegs &= ~RT_BIT_32(pReNative->PostponedEfl.idxReg1); 378 379 /* Use existing EFLAGS shadow if available. */ 380 uint8_t idxRegEfl, idxRegTmp; 381 if (pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(kIemNativeGstReg_EFlags)) 382 { 383 idxRegEfl = pReNative->Core.aidxGstRegShadows[kIemNativeGstReg_EFlags]; 384 Assert(idxRegEfl < IEMNATIVE_HST_GREG_COUNT && (bmAvailableRegs & RT_BIT_32(idxRegEfl))); 391 { 392 /* Note! a_bmInputRegs takes precedence over bmExtraTlbMissRegs. */ 393 bmAvailableRegs = (IEMNATIVE_CALL_VOLATILE_GREG_MASK | bmExtraTlbMissRegs) 394 & ~(a_bmInputRegs | IEMNATIVE_REG_FIXED_MASK) 395 & IEMNATIVE_HST_GREG_MASK; 396 } 397 398 /* Use existing EFLAGS shadow if available. For the TLB-miss code path we 399 need to weed out volatile registers here, as they will no longer be valid. */ 400 uint8_t idxRegTmp; 401 uint8_t idxRegEfl = pReNative->Core.aidxGstRegShadows[kIemNativeGstReg_EFlags]; 402 if ( (pReNative->Core.bmGstRegShadows & RT_BIT_64(kIemNativeGstReg_EFlags)) 403 && (!a_fTlbMiss || !(RT_BIT_32(idxRegEfl) & IEMNATIVE_CALL_VOLATILE_GREG_MASK))) 404 { 405 Assert(idxRegEfl < IEMNATIVE_HST_GREG_COUNT); 406 Assert(!(a_bmInputRegs & RT_BIT_32(idxRegEfl))); 407 if RT_CONSTEXPR_IF(!a_fTlbMiss) Assert(bmAvailableRegs & RT_BIT_32(idxRegEfl)); 385 408 bmAvailableRegs &= ~RT_BIT_32(idxRegEfl); 386 409 #ifdef VBOX_STRICT 387 /** @todo check shadow register content. */410 off = iemNativeEmitGuestRegValueCheckEx(pReNative, pCodeBuf, off, idxRegEfl, kIemNativeGstReg_EFlags); 388 411 #endif 389 412 … … 420 443 * Store EFLAGS. 421 444 */ 445 #ifdef VBOX_STRICT 446 /* check that X86_EFL_1 is set. */ 447 uint32_t offFixup1; 448 off = iemNativeEmitTestBitInGprAndJmpToFixedIfSetEx(pCodeBuf, off, idxRegEfl, X86_EFL_1_BIT, off, &offFixup1); 449 off = iemNativeEmitBrkEx(pCodeBuf, off, 0x3330); 450 iemNativeFixupFixedJump(pReNative, offFixup1, off); 451 /* Check that X86_EFL_RAZ_LO_MASK is zero. */ 452 off = iemNativeEmitTestAnyBitsInGpr32Ex(pCodeBuf, off, idxRegEfl, X86_EFL_RAZ_LO_MASK); 453 uint32_t const offFixup2 = off; 454 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off, kIemNativeInstrCond_e); 455 off = iemNativeEmitBrkEx(pCodeBuf, off, 0x3331); 456 iemNativeFixupFixedJump(pReNative, offFixup2, off); 457 #endif 422 458 off = iemNativeEmitStoreGprToVCpuU32Ex(pCodeBuf, off, idxRegEfl, RT_UOFFSETOF(VMCPU, cpum.GstCtx.eflags)); 423 459 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); … … 435 471 { 436 472 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, IEMNATIVE_MAX_POSTPONED_EFLAGS_INSTRUCTIONS); 437 return iemNativeDoPostponedEFlags AtTbExitInternal<a_bmInputRegs>(pReNative, off, pCodeBuf);473 return iemNativeDoPostponedEFlagsInternal<a_bmInputRegs>(pReNative, off, pCodeBuf); 438 474 } 439 475 return off; … … 446 482 { 447 483 if (pReNative->PostponedEfl.fEFlags) 448 return iemNativeDoPostponedEFlagsAtTbExitInternal<a_bmInputRegs>(pReNative, off, pCodeBuf); 484 return iemNativeDoPostponedEFlagsInternal<a_bmInputRegs>(pReNative, off, pCodeBuf); 485 return off; 486 } 487 488 489 template<uint32_t const a_bmInputRegs> 490 DECL_FORCE_INLINE_THROW(uint32_t) 491 iemNativeDoPostponedEFlagsAtTlbMiss(PIEMRECOMPILERSTATE pReNative, uint32_t off, const IEMNATIVEEMITTLBSTATE *pTlbState, 492 uint32_t bmTmpRegs) 493 { 494 if (pReNative->PostponedEfl.fEFlags) 495 { 496 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, IEMNATIVE_MAX_POSTPONED_EFLAGS_INSTRUCTIONS); 497 return iemNativeDoPostponedEFlagsInternal<a_bmInputRegs, true>(pReNative, off, pCodeBuf, 498 pTlbState->getRegsNotToSave() | bmTmpRegs); 499 } 449 500 return off; 450 501 } … … 490 541 # endif 491 542 Log5(("iemNativeEmitEFlagsForLogical: Skipping %#x\n", X86_EFL_STATUS_BITS)); 543 return off; 492 544 } 493 545 # ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 494 elseif ( ( (fEflPostponing = IEMLIVENESS_STATE_GET_CAN_BE_POSTPONED_SET(pLivenessEntry) & IEMLIVENESSBIT_STATUS_EFL_MASK)495 496 497 546 if ( ( (fEflPostponing = IEMLIVENESS_STATE_GET_CAN_BE_POSTPONED_SET(pLivenessEntry) & IEMLIVENESSBIT_STATUS_EFL_MASK) 547 | fEflClobbered) 548 == IEMLIVENESSBIT_STATUS_EFL_MASK 549 && idxRegResult != UINT8_MAX) 498 550 { 499 551 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeEflPostponedLogical); 500 pReNative->fSkippingEFlags = 0;501 552 pReNative->PostponedEfl.fEFlags = X86_EFL_STATUS_BITS; 502 553 pReNative->PostponedEfl.enmOp = kIemNativePostponedEflOp_Logical; … … 518 569 * Collect flags and merge them with eflags. 519 570 */ 520 /** @todo we could alternatively use SAHF here when host rax is free since,571 /** @todo we could alternatively use LAHF here when host rax is free since, 521 572 * OF is cleared. */ 522 573 PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); … … 579 630 #endif 580 631 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 632 } 581 633 582 634 #ifdef IEMNATIVE_WITH_EFLAGS_SKIPPING 583 pReNative->fSkippingEFlags &= ~X86_EFL_STATUS_BITS;635 pReNative->fSkippingEFlags = 0; 584 636 # ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING 585 637 off = iemNativeEmitStoreImmToVCpuU32(pReNative, off, 0, RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags)); 586 638 # endif 587 639 #endif 588 }589 640 return off; 590 641 } … … 1898 1949 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 64); 1899 1950 /** @todo kIemNativeEmitEFlagsForShiftType_SignedRight: we could alternatively 1900 * use SAHF here when host rax is free since, OF is cleared. */1951 * use LAHF here when host rax is free since, OF is cleared. */ 1901 1952 /* pushf */ 1902 1953 pCodeBuf[off++] = 0x9c; -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r106180 r106187 1192 1192 #ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 1193 1193 # ifdef RT_ARCH_AMD64 1194 # define IEMNATIVE_MAX_POSTPONED_EFLAGS_INSTRUCTIONS 32 1194 # ifdef VBOX_STRICT 1195 # define IEMNATIVE_MAX_POSTPONED_EFLAGS_INSTRUCTIONS 64 1196 # else 1197 # define IEMNATIVE_MAX_POSTPONED_EFLAGS_INSTRUCTIONS 32 1198 # endif 1195 1199 # elif defined(RT_ARCH_ARM64) || defined(DOXYGEN_RUNNING) 1196 # define IEMNATIVE_MAX_POSTPONED_EFLAGS_INSTRUCTIONS 32 1200 # ifdef VBOX_STRICT 1201 # define IEMNATIVE_MAX_POSTPONED_EFLAGS_INSTRUCTIONS 48 1202 # else 1203 # define IEMNATIVE_MAX_POSTPONED_EFLAGS_INSTRUCTIONS 32 1204 # endif 1197 1205 # else 1198 1206 # error "port me" … … 1210 1218 #else 1211 1219 # define IEMNATIVE_CLEAR_POSTPONED_EFLAGS(a_pReNative, a_fEflClobbered) ((void)0) 1220 #endif 1221 1222 /** @def IEMNATIVE_HAS_POSTPONED_EFLAGS_CALCS 1223 * Macro for testing whether there are currently any postponed EFLAGS calcs w/o 1224 * needing to \#ifdef the check. 1225 */ 1226 #ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 1227 # define IEMNATIVE_HAS_POSTPONED_EFLAGS_CALCS(a_pReNative) ((a_pReNative)->PostponedEfl.fEFlags != 0) 1228 #else 1229 # define IEMNATIVE_HAS_POSTPONED_EFLAGS_CALCS(a_pReNative) false 1212 1230 #endif 1213 1231 … … 2129 2147 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitLoadGprWithGstShadowReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, 2130 2148 uint8_t idxHstReg, IEMNATIVEGSTREG enmGstReg); 2149 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitLoadGprWithGstShadowRegEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, 2150 uint8_t idxHstReg, IEMNATIVEGSTREG enmGstReg); 2131 2151 #ifdef VBOX_STRICT 2132 2152 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitTop32BitsClearCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxReg); 2133 2153 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitGuestRegValueCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxReg, 2134 2154 IEMNATIVEGSTREG enmGstReg); 2155 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitGuestRegValueCheckEx(PIEMRECOMPILERSTATE pReNative, PIEMNATIVEINSTR pCodeBuf, 2156 uint32_t off, uint8_t idxReg, IEMNATIVEGSTREG enmGstReg); 2135 2157 # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR 2136 2158 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitGuestSimdRegValueCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxSimdReg, -
trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h
r106180 r106187 278 278 279 279 #elif defined(RT_ARCH_ARM64) 280 if RT_CONSTEXPR ((a_uImm32 >> 16) == 0)280 if RT_CONSTEXPR_IF((a_uImm32 >> 16) == 0) 281 281 /* movz gpr, imm16 */ 282 282 pCodeBuf[off++] = Armv8A64MkInstrMovZ(iGpr, a_uImm32, 0, false /*f64Bit*/); 283 else if RT_CONSTEXPR ((a_uImm32 & UINT32_C(0xffff)) == 0)283 else if RT_CONSTEXPR_IF((a_uImm32 & UINT32_C(0xffff)) == 0) 284 284 /* movz gpr, imm16, lsl #16 */ 285 285 pCodeBuf[off++] = Armv8A64MkInstrMovZ(iGpr, a_uImm32 >> 16, 1, false /*f64Bit*/); 286 else if RT_CONSTEXPR ((a_uImm32 & UINT32_C(0xffff)) == UINT32_C(0xffff))286 else if RT_CONSTEXPR_IF((a_uImm32 & UINT32_C(0xffff)) == UINT32_C(0xffff)) 287 287 /* movn gpr, imm16, lsl #16 */ 288 288 pCodeBuf[off++] = Armv8A64MkInstrMovN(iGpr, ~a_uImm32 >> 16, 1, false /*f64Bit*/); 289 else if RT_CONSTEXPR ((a_uImm32 >> 16) == UINT32_C(0xffff))289 else if RT_CONSTEXPR_IF((a_uImm32 >> 16) == UINT32_C(0xffff)) 290 290 /* movn gpr, imm16 */ 291 291 pCodeBuf[off++] = Armv8A64MkInstrMovN(iGpr, ~a_uImm32, 0, false /*f64Bit*/); … … 769 769 * @note Bits 32 thru 63 in the GPR will be zero after the operation. 770 770 */ 771 DECL_ INLINE_THROW(uint32_t)771 DECL_FORCE_INLINE_THROW(uint32_t) 772 772 iemNativeEmitLoadGprFromVCpuU32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 773 773 { … … 814 814 * @note Bits 16 thru 63 in the GPR will be zero after the operation. 815 815 */ 816 DECL_FORCE_INLINE_THROW(uint32_t) 817 iemNativeEmitLoadGprFromVCpuU16Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 818 { 819 #ifdef RT_ARCH_AMD64 820 /* movzx reg32, mem16 */ 821 if (iGpr >= 8) 822 pCodeBuf[off++] = X86_OP_REX_R; 823 pCodeBuf[off++] = 0x0f; 824 pCodeBuf[off++] = 0xb7; 825 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, iGpr, offVCpu); 826 827 #elif defined(RT_ARCH_ARM64) 828 off = iemNativeEmitGprByVCpuLdStEx(pCodeBuf, off, iGpr, offVCpu, kArmv8A64InstrLdStType_Ld_Half, sizeof(uint16_t)); 829 830 #else 831 # error "port me" 832 #endif 833 return off; 834 } 835 836 837 /** 838 * Emits a 16-bit GPR load of a VCpu value. 839 * @note Bits 16 thru 63 in the GPR will be zero after the operation. 840 */ 816 841 DECL_INLINE_THROW(uint32_t) 817 842 iemNativeEmitLoadGprFromVCpuU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu) 818 843 { 819 844 #ifdef RT_ARCH_AMD64 820 /* movzx reg32, mem16 */ 821 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 822 if (iGpr >= 8) 823 pbCodeBuf[off++] = X86_OP_REX_R; 824 pbCodeBuf[off++] = 0x0f; 825 pbCodeBuf[off++] = 0xb7; 826 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu); 845 off = iemNativeEmitLoadGprFromVCpuU16Ex(iemNativeInstrBufEnsure(pReNative, off, 8), off, iGpr, offVCpu); 827 846 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 828 847 … … 7566 7585 */ 7567 7586 DECL_FORCE_INLINE_THROW(uint32_t) 7568 iemNativeEmitTestAnyBitsInGpr32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprSrc, uint32_t fBits) 7587 iemNativeEmitTestAnyBitsInGpr32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprSrc, uint32_t fBits, 7588 uint8_t iTmpReg = UINT8_MAX) 7569 7589 { 7570 7590 Assert(fBits != 0); … … 7592 7612 pCodeBuf[off++] = RT_BYTE4(fBits); 7593 7613 } 7614 RT_NOREF(iTmpReg); 7594 7615 7595 7616 #elif defined(RT_ARCH_ARM64) … … 7599 7620 if (Armv8A64ConvertMask32ToImmRImmS(fBits, &uImmNandS, &uImmR)) 7600 7621 pCodeBuf[off++] = Armv8A64MkInstrAndsImm(ARMV8_A64_REG_XZR, iGprSrc, uImmNandS, uImmR, false /*f64Bit*/); 7622 else if (iTmpReg != UINT8_MAX) 7623 { 7624 off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, iTmpReg, fBits); 7625 pCodeBuf[off++] = Armv8A64MkInstrAnds(ARMV8_A64_REG_XZR, iGprSrc, iTmpReg, false /*f64Bit*/); 7626 } 7601 7627 else 7602 7628 # ifdef IEM_WITH_THROW_CATCH … … 8219 8245 DECL_INLINE_THROW(uint32_t) iemNativeEmitCallImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uintptr_t uPfn) 8220 8246 { 8221 if RT_CONSTEXPR (!a_fSkipEflChecks)8247 if RT_CONSTEXPR_IF(!a_fSkipEflChecks) 8222 8248 { 8223 8249 IEMNATIVE_ASSERT_EFLAGS_POSTPONING_ONLY(pReNative, X86_EFL_STATUS_BITS); … … 8388 8414 AssertCompile(IEMNATIVELABELTYPE_IS_EXIT_REASON(a_enmExitReason)); 8389 8415 8390 if RT_CONSTEXPR (a_fActuallyExitingTb)8416 if RT_CONSTEXPR_IF(a_fActuallyExitingTb) 8391 8417 iemNativeMarkCurCondBranchAsExiting(pReNative); 8392 8418 8393 8419 #ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 8394 if RT_CONSTEXPR (a_fPostponedEfl)8420 if RT_CONSTEXPR_IF(a_fPostponedEfl) 8395 8421 off = iemNativeDoPostponedEFlagsAtTbExitEx<IEMNATIVELABELTYPE_GET_INPUT_REG_MASK(a_enmExitReason)>(pReNative, off, 8396 8422 pCodeBuf); … … 8429 8455 AssertCompile(IEMNATIVELABELTYPE_IS_EXIT_REASON(a_enmExitReason)); 8430 8456 8431 if RT_CONSTEXPR (a_fActuallyExitingTb)8457 if RT_CONSTEXPR_IF(a_fActuallyExitingTb) 8432 8458 iemNativeMarkCurCondBranchAsExiting(pReNative); 8433 8459 8434 8460 #ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 8435 if RT_CONSTEXPR (a_fPostponedEfl)8461 if RT_CONSTEXPR_IF(a_fPostponedEfl) 8436 8462 off = iemNativeDoPostponedEFlagsAtTbExit<IEMNATIVELABELTYPE_GET_INPUT_REG_MASK(a_enmExitReason)>(pReNative, off); 8437 8463 #endif … … 8475 8501 8476 8502 #ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 8477 if RT_CONSTEXPR (a_fPostponedEfl)8503 if RT_CONSTEXPR_IF(a_fPostponedEfl) 8478 8504 if (pReNative->PostponedEfl.fEFlags) 8479 8505 { … … 8738 8764 * it's the same number of instructions as the TST + B.CC stuff? */ 8739 8765 # ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 8740 if RT_CONSTEXPR (a_fPostponedEfl)8766 if RT_CONSTEXPR_IF(a_fPostponedEfl) 8741 8767 if (pReNative->PostponedEfl.fEFlags) 8742 8768 { … … 8792 8818 IEMNATIVE_ASSERT_EFLAGS_SKIPPING_ONLY(pReNative, X86_EFL_STATUS_BITS); 8793 8819 # ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 8794 if RT_CONSTEXPR (a_fPostponedEfl)8820 if RT_CONSTEXPR_IF(a_fPostponedEfl) 8795 8821 if (pReNative->PostponedEfl.fEFlags) 8796 8822 { … … 8867 8893 IEMNATIVE_ASSERT_EFLAGS_SKIPPING_ONLY(pReNative, X86_EFL_STATUS_BITS); 8868 8894 # ifdef IEMNATIVE_WITH_EFLAGS_POSTPONING 8869 if RT_CONSTEXPR (a_fPostponedEfl)8895 if RT_CONSTEXPR_IF(a_fPostponedEfl) 8870 8896 if (pReNative->PostponedEfl.fEFlags) 8871 8897 {
Note:
See TracChangeset
for help on using the changeset viewer.