Changeset 108245 in vbox for trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
- Timestamp:
- Feb 17, 2025 12:13:38 AM (2 months ago)
- svn:sync-xref-src-repo-rev:
- 167566
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r108244 r108245 177 177 size_t g_cbIemWrote; 178 178 #endif 179 180 181 /**182 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code183 * path.184 *185 * This will also invalidate TLB entries for any pages with active data186 * breakpoints on them.187 *188 * @returns IEM_F_BRK_PENDING_XXX or zero.189 * @param pVCpu The cross context virtual CPU structure of the190 * calling thread.191 *192 * @note Don't call directly, use iemCalcExecDbgFlags instead.193 */194 uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)195 {196 uint32_t fExec = 0;197 198 /*199 * Helper for invalidate the data TLB for breakpoint addresses.200 *201 * This is to make sure any access to the page will always trigger a TLB202 * load for as long as the breakpoint is enabled.203 */204 #ifdef IEM_WITH_DATA_TLB205 # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \206 RTGCPTR uTagNoRev = (a_uValue); \207 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \208 /** @todo do large page accounting */ \209 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \210 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \211 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \212 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \213 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \214 } while (0)215 #else216 # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)217 #endif218 219 /*220 * Process guest breakpoints.221 */222 #define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \223 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \224 { \225 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \226 { \227 case X86_DR7_RW_EO: \228 fExec |= IEM_F_PENDING_BRK_INSTR; \229 break; \230 case X86_DR7_RW_WO: \231 case X86_DR7_RW_RW: \232 fExec |= IEM_F_PENDING_BRK_DATA; \233 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \234 break; \235 case X86_DR7_RW_IO: \236 fExec |= IEM_F_PENDING_BRK_X86_IO; \237 break; \238 } \239 } \240 } while (0)241 242 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];243 if (fGstDr7 & X86_DR7_ENABLED_MASK)244 {245 /** @todo extract more details here to simplify matching later. */246 #ifdef IEM_WITH_DATA_TLB247 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);248 #endif249 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);250 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);251 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);252 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);253 }254 255 /*256 * Process hypervisor breakpoints.257 */258 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);259 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);260 if (fHyperDr7 & X86_DR7_ENABLED_MASK)261 {262 /** @todo extract more details here to simplify matching later. */263 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));264 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));265 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));266 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));267 }268 269 return fExec;270 }271 179 272 180 … … 471 379 472 380 473 474 381 /** 475 382 * Prefetch opcodes the first time when starting executing. … … 760 667 #endif 761 668 } 762 763 764 /** @name Register Access.765 * @{766 */767 768 /**769 * Adds a 8-bit signed jump offset to RIP/EIP/IP.770 *771 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code772 * segment limit.773 *774 * @param pVCpu The cross context virtual CPU structure of the calling thread.775 * @param cbInstr Instruction size.776 * @param offNextInstr The offset of the next instruction.777 * @param enmEffOpSize Effective operand size.778 */779 VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,780 IEMMODE enmEffOpSize) RT_NOEXCEPT781 {782 switch (enmEffOpSize)783 {784 case IEMMODE_16BIT:785 {786 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;787 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit788 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))789 pVCpu->cpum.GstCtx.rip = uNewIp;790 else791 return iemRaiseGeneralProtectionFault0(pVCpu);792 break;793 }794 795 case IEMMODE_32BIT:796 {797 Assert(!IEM_IS_64BIT_CODE(pVCpu));798 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);799 800 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;801 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))802 pVCpu->cpum.GstCtx.rip = uNewEip;803 else804 return iemRaiseGeneralProtectionFault0(pVCpu);805 break;806 }807 808 case IEMMODE_64BIT:809 {810 Assert(IEM_IS_64BIT_CODE(pVCpu));811 812 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;813 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))814 pVCpu->cpum.GstCtx.rip = uNewRip;815 else816 return iemRaiseGeneralProtectionFault0(pVCpu);817 break;818 }819 820 IEM_NOT_REACHED_DEFAULT_CASE_RET();821 }822 823 #ifndef IEM_WITH_CODE_TLB824 /* Flush the prefetch buffer. */825 pVCpu->iem.s.cbOpcode = cbInstr;826 #endif827 828 /*829 * Clear RF and finish the instruction (maybe raise #DB).830 */831 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);832 }833 834 835 /**836 * Adds a 16-bit signed jump offset to RIP/EIP/IP.837 *838 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code839 * segment limit.840 *841 * @returns Strict VBox status code.842 * @param pVCpu The cross context virtual CPU structure of the calling thread.843 * @param cbInstr Instruction size.844 * @param offNextInstr The offset of the next instruction.845 */846 VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT847 {848 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);849 850 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;851 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit852 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))853 pVCpu->cpum.GstCtx.rip = uNewIp;854 else855 return iemRaiseGeneralProtectionFault0(pVCpu);856 857 #ifndef IEM_WITH_CODE_TLB858 /* Flush the prefetch buffer. */859 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);860 #endif861 862 /*863 * Clear RF and finish the instruction (maybe raise #DB).864 */865 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);866 }867 868 869 /**870 * Adds a 32-bit signed jump offset to RIP/EIP/IP.871 *872 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code873 * segment limit.874 *875 * @returns Strict VBox status code.876 * @param pVCpu The cross context virtual CPU structure of the calling thread.877 * @param cbInstr Instruction size.878 * @param offNextInstr The offset of the next instruction.879 * @param enmEffOpSize Effective operand size.880 */881 VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,882 IEMMODE enmEffOpSize) RT_NOEXCEPT883 {884 if (enmEffOpSize == IEMMODE_32BIT)885 {886 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));887 888 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;889 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))890 pVCpu->cpum.GstCtx.rip = uNewEip;891 else892 return iemRaiseGeneralProtectionFault0(pVCpu);893 }894 else895 {896 Assert(enmEffOpSize == IEMMODE_64BIT);897 898 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;899 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))900 pVCpu->cpum.GstCtx.rip = uNewRip;901 else902 return iemRaiseGeneralProtectionFault0(pVCpu);903 }904 905 #ifndef IEM_WITH_CODE_TLB906 /* Flush the prefetch buffer. */907 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);908 #endif909 910 /*911 * Clear RF and finish the instruction (maybe raise #DB).912 */913 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);914 }915 916 /** @} */917 669 918 670
Note:
See TracChangeset
for help on using the changeset viewer.