Changeset 105877 in vbox
- Timestamp:
- Aug 27, 2024 11:17:09 PM (3 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h
r105856 r105877 767 767 * @param off The code buffer offset. 768 768 * @param idxAddrReg The host register with the address to check. 769 * @param idxOldPcReg Register holding the old PC that offPc is relative 770 * to if available, otherwise UINT8_MAX. 769 * @param offDisp The relative displacement that has already been 770 * added to idxAddrReg and must be subtracted if 771 * raising a \#GP(0). 771 772 * @param idxInstr The current instruction. 772 * @tparam a_fAbsolute Not sure why we have this yet.773 773 */ 774 template<bool const a_fAbsolute>775 774 DECL_FORCE_INLINE_THROW(uint32_t) 776 iemNativeEmitCheckGprCanonicalMaybeRaiseGp0With OldPc(PIEMRECOMPILERSTATE pReNative, uint32_t off,777 uint8_t idxAddrReg, uint8_t idxOldPcReg, uint8_t idxInstr)775 iemNativeEmitCheckGprCanonicalMaybeRaiseGp0WithDisp(PIEMRECOMPILERSTATE pReNative, uint32_t off, 776 uint8_t idxAddrReg, int64_t offDisp, uint8_t idxInstr) 778 777 { 779 778 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 780 779 Assert(pReNative->Core.bmGstRegShadowDirty == 0); 781 #endif782 783 #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING784 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING785 if (!pReNative->Core.offPc)786 # endif787 off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));788 #else789 RT_NOREF(idxInstr);790 780 #endif 791 781 … … 822 812 #endif 823 813 814 /* Jump to the #GP code (hoping static prediction considers forward branches as not-taken). */ 815 uint32_t const offFixup1 = off; 816 off = iemNativeEmitJnzToFixed(pReNative, off, off /*8-bit jump suffices*/); 817 818 /* jump .Lnoexcept; Skip the #GP code. */ 819 uint32_t const offFixup2 = off; 820 off = iemNativeEmitJmpToFixed(pReNative, off, off /*8-bit jump suffices*/); 821 822 /* .Lraisexcpt: */ 823 iemNativeFixupFixedJump(pReNative, offFixup1, off); 824 #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING 825 off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr), iTmpReg); 826 #else 827 RT_NOREF(idxInstr); 828 #endif 829 830 /* Undo the PC adjustment and store the old PC value. */ 831 off = iemNativeEmitSubGprImm(pReNative, off, idxAddrReg, offDisp, iTmpReg); 832 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxAddrReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 833 834 off = iemNativeEmitTbExit(pReNative, off, kIemNativeLabelType_RaiseGp0, false /*fActuallyExitingTb*/); 835 836 /* .Lnoexcept: */ 837 iemNativeFixupFixedJump(pReNative, offFixup2, off); 838 839 iemNativeRegFreeTmp(pReNative, iTmpReg); 840 return off; 841 } 842 843 844 /** 845 * Emits code to check if the content of @a idxAddrReg is a canonical address, 846 * raising a \#GP(0) if it isn't. 847 * 848 * Caller makes sure everything is flushed, except maybe PC. 849 * 850 * @returns New code buffer offset, UINT32_MAX on failure. 851 * @param pReNative The native recompile state. 852 * @param off The code buffer offset. 853 * @param idxAddrReg The host register with the address to check. 854 * @param idxOldPcReg Register holding the old PC that offPc is relative 855 * to if available, otherwise UINT8_MAX. 856 * @param idxInstr The current instruction. 857 */ 858 DECL_FORCE_INLINE_THROW(uint32_t) 859 iemNativeEmitCheckGprCanonicalMaybeRaiseGp0WithOldPc(PIEMRECOMPILERSTATE pReNative, uint32_t off, 860 uint8_t idxAddrReg, uint8_t idxOldPcReg, uint8_t idxInstr) 861 { 862 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 863 Assert(pReNative->Core.bmGstRegShadowDirty == 0); 864 #endif 865 866 #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING 867 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 868 if (!pReNative->Core.offPc) 869 # endif 870 off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr)); 871 #else 872 RT_NOREF(idxInstr); 873 #endif 874 875 #ifdef RT_ARCH_AMD64 876 /* 877 * if ((((uint32_t)(a_u64Addr >> 32) + UINT32_C(0x8000)) >> 16) != 0) 878 * return raisexcpt(); 879 * ---- this variant avoid loading a 64-bit immediate, but is an instruction longer. 880 */ 881 uint8_t const iTmpReg = iemNativeRegAllocTmp(pReNative, &off); 882 883 off = iemNativeEmitLoadGprFromGpr(pReNative, off, iTmpReg, idxAddrReg); 884 off = iemNativeEmitShiftGprRight(pReNative, off, iTmpReg, 32); 885 off = iemNativeEmitAddGpr32Imm(pReNative, off, iTmpReg, (int32_t)0x8000); 886 off = iemNativeEmitShiftGprRight(pReNative, off, iTmpReg, 16); 887 888 #elif defined(RT_ARCH_ARM64) 889 /* 890 * if ((((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000)) >> 48) != 0) 891 * return raisexcpt(); 892 * ---- 893 * mov x1, 0x800000000000 894 * add x1, x0, x1 895 * cmp xzr, x1, lsr 48 896 * b.ne .Lraisexcpt 897 */ 898 uint8_t const iTmpReg = iemNativeRegAllocTmp(pReNative, &off); 899 900 off = iemNativeEmitLoadGprImm64(pReNative, off, iTmpReg, UINT64_C(0x800000000000)); 901 off = iemNativeEmitAddTwoGprs(pReNative, off, iTmpReg, idxAddrReg); 902 off = iemNativeEmitCmpArm64(pReNative, off, ARMV8_A64_REG_XZR, iTmpReg, true /*f64Bit*/, 48 /*cShift*/, kArmv8A64InstrShift_Lsr); 903 #else 904 # error "Port me" 905 #endif 906 824 907 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 825 908 if (pReNative->Core.offPc) … … 832 915 off = iemNativeEmitJzToFixed(pReNative, off, off + 16 /*8-bit suffices*/); 833 916 834 /* Raising a GP(0), but first we need to update cpum.GstCtx.rip. */ 917 /* .Lraisexcpt: */ 918 # ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING 919 off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr), iTmpReg); 920 # endif 921 /* We need to update cpum.GstCtx.rip. */ 835 922 if (idxOldPcReg == UINT8_MAX) 836 923 { … … 840 927 off = iemNativeEmitAddGprImm(pReNative, off, idxOldPcReg, pReNative->Core.offPc); 841 928 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxOldPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 842 # ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING 843 off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr)); 844 # endif 929 845 930 off = iemNativeEmitTbExit(pReNative, off, kIemNativeLabelType_RaiseGp0, false /*fActuallyExitingTb*/); 846 931 iemNativeFixupFixedJump(pReNative, offFixup, off); … … 911 996 * to if available, otherwise UINT8_MAX. 912 997 * @param idxInstr The current instruction. 913 * @tparam a_fAbsolute Not sure why we have this yet.914 998 */ 915 template<bool const a_fAbsolute>916 999 DECL_FORCE_INLINE_THROW(uint32_t) 917 1000 iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0WithOldPc(PIEMRECOMPILERSTATE pReNative, uint32_t off, … … 1049 1132 if (a_fWithinPage && enmEffOpSize == IEMMODE_64BIT) 1050 1133 { 1134 /* No #GP checking required, just update offPc and get on with it. */ 1051 1135 pReNative->Core.offPc += (int64_t)offDisp + cbInstr; 1052 1136 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG … … 1057 1141 #endif 1058 1142 { 1059 /* We speculatively modify PC and may raise #GP(0), so make sure the right values are in CPUMCTX. */ 1060 off = iemNativeRegFlushPendingWrites(pReNative, off); 1061 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 1062 Assert(pReNative->Core.offPc == 0); 1063 #endif 1143 /* Flush all but PC iff we're doing a 64-bit update here and this isn't within a page.. */ 1144 if (RT_LIKELY(enmEffOpSize == IEMMODE_64BIT && !a_fWithinPage)) 1145 off = iemNativeRegFlushPendingWrites(pReNative, off, RT_BIT_64(kIemNativeGstReg_Pc) /*fGstShwExcept*/); 1146 1064 1147 /* Allocate a temporary PC register. */ 1065 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate); 1148 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, 1149 kIemNativeGstRegUse_ForUpdate); 1066 1150 1067 1151 /* Perform the addition. */ … … 1073 1157 We can skip this if the target is within the same page. */ 1074 1158 if (!a_fWithinPage) 1075 off = iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr); 1159 off = iemNativeEmitCheckGprCanonicalMaybeRaiseGp0WithDisp(pReNative, off, idxPcReg, 1160 (int64_t)offDisp + cbInstr, idxInstr); 1076 1161 } 1077 1162 else … … 1081 1166 off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg); 1082 1167 } 1083 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 1168 1169 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 1170 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 1084 1171 off = iemNativeEmitPcDebugAdd(pReNative, off, (int64_t)offDisp + cbInstr, enmEffOpSize == IEMMODE_64BIT ? 64 : 16); 1085 1172 off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxPcReg); 1173 # endif 1174 /* Since we've already got the new PC value in idxPcReg, we can just as 1175 well write it out and reset offPc to zero. Otherwise, we'd need to use 1176 a copy the shadow PC, which will cost another move instruction here. */ 1177 uint8_t const idxOldInstrPlusOne = pReNative->idxInstrPlusOneOfLastPcUpdate; 1178 pReNative->idxInstrPlusOneOfLastPcUpdate = RT_MAX(idxInstr + 1, idxOldInstrPlusOne); 1179 uint8_t const cInstrsSkipped = idxInstr <= idxOldInstrPlusOne ? 0 : idxInstr - idxOldInstrPlusOne; 1180 Log4(("iemNativeEmitRip64RelativeJumpAndFinishingNoFlags: offPc=%#RX64 -> 0; off=%#x; idxInstr=%u cInstrsSkipped=%u cCondDepth=%d\n", 1181 pReNative->Core.offPc, off, idxInstr, cInstrsSkipped, pReNative->cCondDepth)); 1182 STAM_COUNTER_ADD(&pReNative->pVCpu->iem.s.StatNativePcUpdateDelayed, cInstrsSkipped); 1183 # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO 1184 iemNativeDbgInfoAddNativeOffset(pReNative, off); 1185 iemNativeDbgInfoAddDelayedPcUpdate(pReNative, pReNative->Core.offPc, cInstrsSkipped); 1186 # endif 1187 pReNative->Core.offPc = 0; 1086 1188 #endif 1087 1189 … … 1380 1482 { 1381 1483 if (f64Bit) 1382 off = iemNativeEmitCheckGprCanonicalMaybeRaiseGp0WithOldPc <true>(pReNative, off, idxPcReg, idxOldPcReg, idxInstr);1484 off = iemNativeEmitCheckGprCanonicalMaybeRaiseGp0WithOldPc(pReNative, off, idxPcReg, idxOldPcReg, idxInstr); 1383 1485 else 1384 off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0WithOldPc<true>(pReNative, off, idxPcReg, 1385 idxOldPcReg, idxInstr); 1486 off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0WithOldPc(pReNative, off, idxPcReg, idxOldPcReg, idxInstr); 1386 1487 } 1387 1488 -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r105856 r105877 2072 2072 pReNative->Core.offPc = 0; 2073 2073 # if defined(IEMNATIVE_WITH_TB_DEBUG_INFO) || defined(VBOX_WITH_STATISTICS) 2074 pReNative-> Core.idxInstrPlusOneOfLastPcUpdate = 0;2074 pReNative->idxInstrPlusOneOfLastPcUpdate = 0; 2075 2075 # endif 2076 2076 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG … … 2722 2722 PIEMTBDBGENTRY const pEntry = iemNativeDbgInfoAddNewEntry(pReNative, pReNative->pDbgInfo); 2723 2723 pEntry->DelayedPcUpdate.uType = kIemTbDbgEntryType_DelayedPcUpdate; 2724 pEntry->DelayedPcUpdate.cInstrSkipped = cInstrSkipped; 2724 2725 pEntry->DelayedPcUpdate.offPc = offPc; /** @todo support larger values */ 2725 pEntry->DelayedPcUpdate.cInstrSkipped = cInstrSkipped;2726 2726 } 2727 2727 # endif … … 5768 5768 Log4(("iemNativeEmitPcWritebackSlow: offPc=%#RX64 -> 0; off=%#x\n", pReNative->Core.offPc, off)); 5769 5769 # else 5770 uint8_t const idxOldInstrPlusOne = pReNative-> Core.idxInstrPlusOneOfLastPcUpdate;5770 uint8_t const idxOldInstrPlusOne = pReNative->idxInstrPlusOneOfLastPcUpdate; 5771 5771 uint8_t idxCurCall = pReNative->idxCurCall; 5772 5772 uint8_t idxInstr = pReNative->pTbOrg->Thrd.paCalls[idxCurCall].idxInstr; /* unreliable*/ 5773 5773 while (idxInstr == 0 && idxInstr + 1 < idxOldInstrPlusOne && idxCurCall > 0) 5774 5774 idxInstr = pReNative->pTbOrg->Thrd.paCalls[--idxCurCall].idxInstr; 5775 uint8_t const cInstrsSkipped = idxInstr <= pReNative->Core.idxInstrPlusOneOfLastPcUpdate ? 05776 : idxInstr - pReNative->Core.idxInstrPlusOneOfLastPcUpdate;5775 pReNative->idxInstrPlusOneOfLastPcUpdate = RT_MAX(idxInstr + 1, idxOldInstrPlusOne); 5776 uint8_t const cInstrsSkipped = idxInstr <= idxOldInstrPlusOne ? 0 : idxInstr - idxOldInstrPlusOne; 5777 5777 Log4(("iemNativeEmitPcWritebackSlow: offPc=%#RX64 -> 0; off=%#x; idxInstr=%u cInstrsSkipped=%u\n", 5778 5778 pReNative->Core.offPc, off, idxInstr, cInstrsSkipped)); 5779 5779 5780 pReNative->Core.idxInstrPlusOneOfLastPcUpdate = RT_MAX(idxInstr + 1, pReNative->Core.idxInstrPlusOneOfLastPcUpdate);5781 5780 STAM_COUNTER_ADD(&pReNative->pVCpu->iem.s.StatNativePcUpdateDelayed, cInstrsSkipped); 5782 5781 -
trunk/src/VBox/VMM/include/IEMInternal.h
r105853 r105877 1448 1448 /* kIemTbDbgEntryType_DelayedPcUpdate. */ 1449 1449 uint32_t uType : 4; 1450 /** Number of instructions skipped. */ 1451 uint32_t cInstrSkipped : 8; 1450 1452 /* The instruction offset added to the program counter. */ 1451 uint32_t offPc : 14; 1452 /** Number of instructions skipped. */ 1453 uint32_t cInstrSkipped : 14; 1453 int32_t offPc : 20; 1454 1454 } DelayedPcUpdate; 1455 1455 #endif -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r105863 r105877 1258 1258 * as long as possible. */ 1259 1259 int64_t offPc; 1260 # if defined(IEMNATIVE_WITH_TB_DEBUG_INFO) || defined(VBOX_WITH_STATISTICS)1261 /** Statistics: The idxInstr+1 value at the last PC update. */1262 uint8_t idxInstrPlusOneOfLastPcUpdate;1263 # endif1264 1260 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 1265 1261 /** Set after we've loaded PC into uPcUpdatingDebug at the first update. */ … … 1463 1459 #endif 1464 1460 1461 #if defined(IEMNATIVE_WITH_TB_DEBUG_INFO) || defined(VBOX_WITH_STATISTICS) 1462 /** Statistics: The idxInstr+1 value at the last PC update. */ 1463 uint8_t idxInstrPlusOneOfLastPcUpdate; 1464 #endif 1465 1465 1466 #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO 1466 1467 /** Number of debug info entries allocated for pDbgInfo. */ 1467 1468 uint32_t cDbgInfoAlloc; 1468 uint32_t uPadding;1469 1469 /** Debug info. */ 1470 1470 PIEMTBDBG pDbgInfo; -
trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h
r105856 r105877 1059 1059 */ 1060 1060 DECL_INLINE_THROW(uint32_t) 1061 iemNativeEmitStoreImmToVCpuU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t bImm, uint32_t offVCpu) 1061 iemNativeEmitStoreImmToVCpuU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t bImm, uint32_t offVCpu, 1062 uint8_t idxRegTmp = UINT8_MAX) 1062 1063 { 1063 1064 #ifdef RT_ARCH_AMD64 … … 1068 1069 pbCodeBuf[off++] = bImm; 1069 1070 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1071 RT_NOREF(idxRegTmp); 1070 1072 1071 1073 #elif defined(RT_ARCH_ARM64) 1072 1074 /* Cannot use IEMNATIVE_REG_FIXED_TMP0 for the immediate as that's used by iemNativeEmitGprByVCpuLdSt. */ 1073 uint8_t const idxRegImm = iemNativeRegAllocTmpImm(pReNative, &off, bImm); 1074 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, idxRegImm, offVCpu, kArmv8A64InstrLdStType_St_Byte, sizeof(uint8_t)); 1075 iemNativeRegFreeTmpImm(pReNative, idxRegImm); 1075 if (idxRegTmp != UINT8_MAX) 1076 { 1077 Assert(idxRegTmp != IEMNATIVE_REG_FIXED_TMP0); 1078 off = iemNativeEmitLoadGprImm32(pReNative, off, idxRegTmp, bImm); 1079 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, idxRegTmp, offVCpu, kArmv8A64InstrLdStType_St_Byte, sizeof(uint8_t)); 1080 } 1081 else 1082 { 1083 uint8_t const idxRegImm = iemNativeRegAllocTmpImm(pReNative, &off, bImm); 1084 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, idxRegImm, offVCpu, kArmv8A64InstrLdStType_St_Byte, sizeof(uint8_t)); 1085 iemNativeRegFreeTmpImm(pReNative, idxRegImm); 1086 } 1076 1087 1077 1088 #else
Note:
See TracChangeset
for help on using the changeset viewer.