- Timestamp:
- Jan 8, 2024 11:59:25 AM (13 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp
r102430 r102785 4985 4985 case IEMMODE_32BIT: 4986 4986 { 4987 uint32_t u32Value; 4988 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp); 4989 if (rcStrict == VINF_SUCCESS) 4990 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u32Value); 4987 /* Modern Intel CPU only does a WORD sized access here, both as 4988 segmentation and paging is concerned. So, we have to emulate 4989 this to make bs3-cpu-weird-1 happy. */ 4990 if (IEM_IS_GUEST_CPU_INTEL(pVCpu)) 4991 { 4992 /* We don't have flexible enough stack primitives here, so just 4993 do a word pop and add two bytes to SP/RSP on success. */ 4994 uint16_t uSel; 4995 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp); 4996 if (rcStrict == VINF_SUCCESS) 4997 { 4998 iemRegAddToRspEx(pVCpu, &TmpRsp, sizeof(uint32_t) - sizeof(uint16_t)); 4999 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel); 5000 } 5001 } 5002 else 5003 { 5004 uint32_t u32Value; 5005 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp); 5006 if (rcStrict == VINF_SUCCESS) 5007 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u32Value); 5008 } 4991 5009 break; 4992 5010 } … … 4994 5012 case IEMMODE_64BIT: 4995 5013 { 4996 uint64_t u64Value; 4997 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp); 4998 if (rcStrict == VINF_SUCCESS) 4999 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u64Value); 5014 /* Like for the 32-bit case above, intel only does a WORD access. */ 5015 if (IEM_IS_GUEST_CPU_INTEL(pVCpu)) 5016 { 5017 uint16_t uSel; 5018 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp); 5019 if (rcStrict == VINF_SUCCESS) 5020 { 5021 iemRegAddToRspEx(pVCpu, &TmpRsp, sizeof(uint64_t) - sizeof(uint16_t)); 5022 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel); 5023 } 5024 } 5025 else 5026 { 5027 uint64_t u64Value; 5028 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp); 5029 if (rcStrict == VINF_SUCCESS) 5030 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u64Value); 5031 } 5000 5032 break; 5001 5033 } -
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h
r102766 r102785 489 489 # endif 490 490 491 /* The intel docs talks about zero extending the selector register 492 value. My actual intel CPU here might be zero extending the value 493 but it still only writes the lower word... */ 494 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what 495 * happens when crossing an electric page boundrary, is the high word checked 496 * for write accessibility or not? Probably it is. What about segment limits? 497 * It appears this behavior is also shared with trap error codes. 498 * 499 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check 500 * ancient hardware when it actually did change. */ 501 uint8_t bUnmapInfo; 502 uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrMem, 503 IEM_ACCESS_STACK_W, sizeof(uint16_t) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */ 504 *puDst = (uint16_t)uValue; 505 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 506 507 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg]\n", GCPtrMem, uValue)); 491 /* bs3-cpu-weird-1 explores this instruction. AMD 3990X does it by the book, 492 with a zero extended DWORD write. While my Intel 10890XE goes all weird 493 in real mode where it will write a DWORD with the top word of EFLAGS in 494 the top half. In all other modes it does a WORD access. */ 495 496 /** @todo Docs indicate the behavior changed maybe in Pentium or Pentium Pro. 497 * Check ancient hardware when it actually did change. */ 498 uint8_t bUnmapInfo; 499 if (IEM_IS_GUEST_CPU_INTEL(pVCpu)) 500 { 501 if (!IEM_IS_REAL_MODE(pVCpu)) 502 { 503 /* WORD per intel specs. */ 504 uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrMem, 505 IEM_ACCESS_STACK_W, sizeof(uint16_t) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */ 506 *puDst = (uint16_t)uValue; 507 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 508 Log12(("IEM WR 'word' SS|%RGv: %#06x [sreg/i]\n", GCPtrMem, (uint16_t)uValue)); 509 } 510 else 511 { 512 /* DWORD real mode weirness observed on 10980XE. */ 513 /** @todo Check this on other intel CPUs and when pushing registers other 514 * than FS (which all that bs3-cpu-weird-1 does atm). (Maybe this is 515 * something for the CPU profile... Hope not.) */ 516 uint32_t *puDst = (uint32_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem, 517 IEM_ACCESS_STACK_W, sizeof(uint32_t) - 1); 518 *puDst = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK)); 519 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 520 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg/ir]\n", GCPtrMem, uValue)); 521 } 522 } 523 else 524 { 525 /* DWORD per spec. */ 526 uint32_t *puDst = (uint32_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem, 527 IEM_ACCESS_STACK_W, sizeof(uint32_t) - 1); 528 *puDst = uValue; 529 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 530 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg]\n", GCPtrMem, uValue)); 531 } 508 532 } 509 533 # endif /* TMPL_WITH_PUSH_SREG */ -
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h
r102766 r102785 1097 1097 { 1098 1098 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1099 /* See fallback for details on this weirdness: */ 1100 bool const fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu); 1101 uint8_t const cbAccess = fIsIntel && !IEM_IS_REAL_MODE(pVCpu) ? sizeof(uint16_t) : sizeof(TMPL_MEM_TYPE); 1102 1099 1103 /* 1100 1104 * Decrement the stack pointer (prep), apply segmentation and check that … … 1103 1107 uint64_t uNewRsp; 1104 1108 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp); 1105 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop); 1106 # if TMPL_MEM_TYPE_SIZE > 1 1107 if (RT_LIKELY( !(GCPtrEff & (sizeof(uint16_t) - 1U)) 1108 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) )) 1109 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, cbAccess, GCPtrTop); 1110 # if TMPL_MEM_TYPE_SIZE > 1 1111 if (RT_LIKELY( !(GCPtrEff & (cbAccess - 1U)) 1112 || ( cbAccess == sizeof(uint16_t) 1113 ? TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) 1114 : TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ) )) 1109 1115 # endif 1110 1116 { … … 1133 1139 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1134 1140 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 1135 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n", 1136 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue)); 1137 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue; 1141 if (cbAccess == sizeof(uint16_t)) 1142 { 1143 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR 'word' SS|%RGv (%RX64->%RX64): %#06x [sreg/i]\n", 1144 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, (uint16_t)uValue)); 1145 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue; 1146 } 1147 else 1148 { 1149 TMPL_MEM_TYPE * const puSlot = (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 1150 if (fIsIntel) 1151 { 1152 Assert(IEM_IS_REAL_MODE(pVCpu)); 1153 uValue = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK)); 1154 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg/ir]\n", 1155 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue)); 1156 } 1157 else 1158 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n", 1159 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue)); 1160 *puSlot = uValue; 1161 } 1138 1162 pVCpu->cpum.GstCtx.rsp = uNewRsp; 1139 1163 return; … … 1287 1311 { 1288 1312 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 1313 /* See fallback for details on this weirdness: */ 1314 bool const fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu); 1315 uint8_t const cbAccess = fIsIntel && !IEM_IS_REAL_MODE(pVCpu) ? sizeof(uint16_t) : sizeof(TMPL_MEM_TYPE); 1316 1289 1317 /* 1290 1318 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary. 1291 1319 */ 1292 1320 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE); 1293 if (RT_LIKELY( !(uNewEsp & (sizeof(uint16_t) - 1)) 1294 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, uint16_t) )) 1321 if (RT_LIKELY( !(uNewEsp & (cbAccess - 1)) 1322 || (cbAccess == sizeof(uint16_t) 1323 ? TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, uint16_t) 1324 : TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, TMPL_MEM_TYPE)) )) 1295 1325 { 1296 1326 /* … … 1318 1348 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 1319 1349 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 1320 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE " [sreg]\n", 1321 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue)); 1322 *(uint16_t *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue; 1350 if (cbAccess == sizeof(uint16_t)) 1351 { 1352 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR 'word' SS|%RX32 (<-%RX32): %#06x [sreg/i]\n", 1353 uNewEsp, pVCpu->cpum.GstCtx.esp, (uint16_t)uValue)); 1354 *(uint16_t *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue; 1355 } 1356 else 1357 { 1358 TMPL_MEM_TYPE * const puSlot = (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK]; 1359 if (fIsIntel) 1360 { 1361 Assert(IEM_IS_REAL_MODE(pVCpu)); 1362 uValue = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK)); 1363 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-RX32): " TMPL_MEM_FMT_TYPE " [sreg/ir]\n", 1364 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue)); 1365 } 1366 else 1367 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-RX32): " TMPL_MEM_FMT_TYPE " [sreg]\n", 1368 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue)); 1369 *puSlot = uValue; 1370 } 1323 1371 pVCpu->cpum.GstCtx.rsp = uNewEsp; 1324 1372 return; -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r102770 r102785 2589 2589 pReNative->cDbgInfoAlloc = _16K; 2590 2590 #endif 2591 2592 /* Other constant stuff: */ 2593 pReNative->pVCpu = pVCpu; 2591 2594 2592 2595 /* … … 11578 11581 */ 11579 11582 uint8_t const cbMem = RT_BYTE1(cBitsVarAndFlat) / 8; 11583 bool const fIsSegReg = RT_BYTE3(cBitsVarAndFlat) != 0; 11584 bool const fIsIntelSeg = fIsSegReg && IEM_IS_GUEST_CPU_INTEL(pReNative->pVCpu); 11585 uint8_t const cbMemAccess = !fIsIntelSeg || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_16BIT 11586 ? cbMem : sizeof(uint16_t); 11580 11587 uint8_t const cBitsFlat = RT_BYTE2(cBitsVarAndFlat); RT_NOREF(cBitsFlat); 11581 bool const fIsSegReg = RT_BYTE3(cBitsVarAndFlat) != 0; RT_NOREF(fIsSegReg);11582 11588 uint8_t const idxRegRsp = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xSP), 11583 11589 kIemNativeGstRegUse_ForUpdate, true /*fNoVolatileRegs*/); … … 11627 11633 */ 11628 11634 uint8_t const iSegReg = cBitsFlat != 0 ? UINT8_MAX : X86_SREG_SS; 11629 IEMNATIVEEMITTLBSTATE const TlbState(pReNative, idxRegEffSp, &off, iSegReg, cbMem );11635 IEMNATIVEEMITTLBSTATE const TlbState(pReNative, idxRegEffSp, &off, iSegReg, cbMemAccess); 11630 11636 uint16_t const uTlbSeqNo = pReNative->uTlbSeqNo++; 11631 11637 uint32_t const idxLabelTlbMiss = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbMiss, UINT32_MAX, uTlbSeqNo); … … 11733 11739 * TlbLookup: 11734 11740 */ 11735 off = iemNativeEmitTlbLookup(pReNative, off, &TlbState, iSegReg, cbMem , cbMem- 1, IEM_ACCESS_TYPE_WRITE,11741 off = iemNativeEmitTlbLookup(pReNative, off, &TlbState, iSegReg, cbMemAccess, cbMemAccess - 1, IEM_ACCESS_TYPE_WRITE, 11736 11742 idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult); 11737 11743 … … 11742 11748 if (idxRegValue != UINT8_MAX) 11743 11749 { 11744 switch (cbMem )11750 switch (cbMemAccess) 11745 11751 { 11746 11752 case 2: … … 11748 11754 break; 11749 11755 case 4: 11750 if (!fIs SegReg)11756 if (!fIsIntelSeg) 11751 11757 off = iemNativeEmitStoreGpr32ByGprEx(pCodeBuf, off, idxRegValue, idxRegMemResult); 11752 11758 else 11753 off = iemNativeEmitStoreGpr16ByGprEx(pCodeBuf, off, idxRegValue, idxRegMemResult); 11759 { 11760 /* intel real mode segment push. 10890XE adds the 2nd of half EFLAGS to a 11761 PUSH FS in real mode, so we have to try emulate that here. 11762 We borrow the now unused idxReg1 from the TLB lookup code here. */ 11763 uint8_t idxRegEfl = iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(pReNative, &off, 11764 kIemNativeGstReg_EFlags); 11765 if (idxRegEfl != UINT8_MAX) 11766 { 11767 #ifdef ARCH_AMD64 11768 off = iemNativeEmitLoadGprFromGpr32(pReNative, off, TlbState.idxReg1, idxRegEfl); 11769 off = iemNativeEmitAndGpr32ByImm(pReNative, off, TlbState.idxReg1, 11770 UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK); 11771 #else 11772 off = iemNativeEmitGpr32EqGprAndImmEx(iemNativeInstrBufEnsure(pReNative, off, 3), 11773 off, TlbState.idxReg1, idxRegEfl, 11774 UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK); 11775 #endif 11776 iemNativeRegFreeTmp(pReNative, idxRegEfl); 11777 } 11778 else 11779 { 11780 off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, TlbState.idxReg1, 11781 RT_UOFFSETOF(VMCPUCC, cpum.GstCtx.eflags)); 11782 off = iemNativeEmitAndGpr32ByImm(pReNative, off, TlbState.idxReg1, 11783 UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK); 11784 } 11785 /* ASSUMES the upper half of idxRegValue is ZERO. */ 11786 off = iemNativeEmitOrGpr32ByGpr(pReNative, off, TlbState.idxReg1, idxRegValue); 11787 off = iemNativeEmitStoreGpr32ByGprEx(pCodeBuf, off, TlbState.idxReg1, idxRegMemResult); 11788 } 11754 11789 break; 11755 11790 case 8: … … 11762 11797 else 11763 11798 { 11764 switch (cbMem )11799 switch (cbMemAccess) 11765 11800 { 11766 11801 case 2: … … 11854 11889 == ( cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 32, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat32PopGRegU16 11855 11890 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 32, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat32PopGRegU32 11856 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8( 64, 16, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat64PopGRegU1611891 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 64, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat64PopGRegU16 11857 11892 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(64, 64, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat64PopGRegU64 11858 11893 : UINT64_C(0xc000b000a0009000) )); -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r102765 r102785 683 683 /** The translation block being recompiled. */ 684 684 PCIEMTB pTbOrg; 685 /** The VMCPU structure of the EMT. */ 686 PVMCPUCC pVCpu; 685 687 686 688 /** Condition sequence number (for generating unique labels). */ -
trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h
r102765 r102785 3912 3912 * Emits code for OR'ing two 64-bit GPRs. 3913 3913 */ 3914 DECL_ INLINE_THROW(uint32_t)3914 DECL_FORCE_INLINE(uint32_t) 3915 3915 iemNativeEmitOrGprByGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc) 3916 3916 { … … 3927 3927 # error "Port me" 3928 3928 #endif 3929 return off; 3930 } 3931 3932 3933 /** 3934 * Emits code for OR'ing two 32-bit GPRs. 3935 * @note Bits 63:32 of the destination GPR will be cleared. 3936 */ 3937 DECL_FORCE_INLINE(uint32_t) 3938 iemNativeEmitOrGpr32ByGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc) 3939 { 3940 #if defined(RT_ARCH_AMD64) 3941 /* or Gv, Ev */ 3942 if (iGprDst >= 8 || iGprSrc >= 8) 3943 pCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B); 3944 pCodeBuf[off++] = 0x0b; 3945 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7); 3946 3947 #elif defined(RT_ARCH_ARM64) 3948 pCodeBuf[off++] = Armv8A64MkInstrOrr(iGprDst, iGprDst, iGprSrc, false /*f64Bit*/); 3949 3950 #else 3951 # error "Port me" 3952 #endif 3953 return off; 3954 } 3955 3956 3957 /** 3958 * Emits code for OR'ing two 32-bit GPRs. 3959 * @note Bits 63:32 of the destination GPR will be cleared. 3960 */ 3961 DECL_INLINE_THROW(uint32_t) 3962 iemNativeEmitOrGpr32ByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc) 3963 { 3964 #if defined(RT_ARCH_AMD64) 3965 off = iemNativeEmitOrGpr32ByGprEx(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst, iGprSrc); 3966 #elif defined(RT_ARCH_ARM64) 3967 off = iemNativeEmitOrGpr32ByGprEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iGprSrc); 3968 #else 3969 # error "Port me" 3970 #endif 3971 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 3929 3972 return off; 3930 3973 }
Note:
See TracChangeset
for help on using the changeset viewer.