VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAll-x86.cpp

Last change on this file was 108791, checked in by vboxsync, 11 days ago

VMM/IEM: More ARM target work. jiraref:VBP-1598

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 10.0 KB
Line 
1/* $Id: IEMAll-x86.cpp 108791 2025-03-28 21:58:31Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - x86 target, miscellaneous.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/dbgf.h>
40#include "IEMInternal.h"
41#include <VBox/vmm/vmcc.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <iprt/assert.h>
45#include <iprt/errcore.h>
46#include <iprt/string.h>
47#include <iprt/x86.h>
48
49#include "IEMInline-x86.h" /* iemRegFinishClearingRF */
50
51
52/**
53 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
54 * path.
55 *
56 * This will also invalidate TLB entries for any pages with active data
57 * breakpoints on them.
58 *
59 * @returns IEM_F_BRK_PENDING_XXX or zero.
60 * @param pVCpu The cross context virtual CPU structure of the
61 * calling thread.
62 *
63 * @note Don't call directly, use iemCalcExecDbgFlags instead.
64 */
65uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
66{
67 uint32_t fExec = 0;
68
69 /*
70 * Helper for invalidate the data TLB for breakpoint addresses.
71 *
72 * This is to make sure any access to the page will always trigger a TLB
73 * load for as long as the breakpoint is enabled.
74 */
75#ifdef IEM_WITH_DATA_TLB
76# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
77 RTGCPTR uTagNoRev = (a_uValue); \
78 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(pVCpu, uTagNoRev); \
79 /** @todo do large page accounting */ \
80 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
81 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
82 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
83 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
84 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
85 } while (0)
86#else
87# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
88#endif
89
90 /*
91 * Process guest breakpoints.
92 */
93#define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
94 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
95 { \
96 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
97 { \
98 case X86_DR7_RW_EO: \
99 fExec |= IEM_F_PENDING_BRK_INSTR; \
100 break; \
101 case X86_DR7_RW_WO: \
102 case X86_DR7_RW_RW: \
103 fExec |= IEM_F_PENDING_BRK_DATA; \
104 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
105 break; \
106 case X86_DR7_RW_IO: \
107 fExec |= IEM_F_PENDING_BRK_X86_IO; \
108 break; \
109 } \
110 } \
111 } while (0)
112
113 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
114 if (fGstDr7 & X86_DR7_ENABLED_MASK)
115 {
116/** @todo extract more details here to simplify matching later. */
117#ifdef IEM_WITH_DATA_TLB
118 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
119#endif
120 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
121 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
122 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
123 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
124 }
125
126 /*
127 * Process hypervisor breakpoints.
128 */
129 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
130 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
131 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
132 {
133/** @todo extract more details here to simplify matching later. */
134 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
135 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
136 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
137 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
138 }
139
140 return fExec;
141}
142
143
144/** @name Register Access.
145 * @{
146 */
147
148/**
149 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
150 *
151 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
152 * segment limit.
153 *
154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
155 * @param cbInstr Instruction size.
156 * @param offNextInstr The offset of the next instruction.
157 * @param enmEffOpSize Effective operand size.
158 */
159VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
160 IEMMODE enmEffOpSize) RT_NOEXCEPT
161{
162 switch (enmEffOpSize)
163 {
164 case IEMMODE_16BIT:
165 {
166 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
167 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
168 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
169 pVCpu->cpum.GstCtx.rip = uNewIp;
170 else
171 return iemRaiseGeneralProtectionFault0(pVCpu);
172 break;
173 }
174
175 case IEMMODE_32BIT:
176 {
177 Assert(!IEM_IS_64BIT_CODE(pVCpu));
178 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
179
180 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
181 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
182 pVCpu->cpum.GstCtx.rip = uNewEip;
183 else
184 return iemRaiseGeneralProtectionFault0(pVCpu);
185 break;
186 }
187
188 case IEMMODE_64BIT:
189 {
190 Assert(IEM_IS_64BIT_CODE(pVCpu));
191
192 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
193 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
194 pVCpu->cpum.GstCtx.rip = uNewRip;
195 else
196 return iemRaiseGeneralProtectionFault0(pVCpu);
197 break;
198 }
199
200 IEM_NOT_REACHED_DEFAULT_CASE_RET();
201 }
202
203#ifndef IEM_WITH_CODE_TLB
204 /* Flush the prefetch buffer. */
205 pVCpu->iem.s.cbOpcode = cbInstr;
206#endif
207
208 /*
209 * Clear RF and finish the instruction (maybe raise #DB).
210 */
211 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
212}
213
214
215/**
216 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
217 *
218 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
219 * segment limit.
220 *
221 * @returns Strict VBox status code.
222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
223 * @param cbInstr Instruction size.
224 * @param offNextInstr The offset of the next instruction.
225 */
226VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
227{
228 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
229
230 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
231 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
232 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
233 pVCpu->cpum.GstCtx.rip = uNewIp;
234 else
235 return iemRaiseGeneralProtectionFault0(pVCpu);
236
237#ifndef IEM_WITH_CODE_TLB
238 /* Flush the prefetch buffer. */
239 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
240#endif
241
242 /*
243 * Clear RF and finish the instruction (maybe raise #DB).
244 */
245 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
246}
247
248
249/**
250 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
251 *
252 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
253 * segment limit.
254 *
255 * @returns Strict VBox status code.
256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
257 * @param cbInstr Instruction size.
258 * @param offNextInstr The offset of the next instruction.
259 * @param enmEffOpSize Effective operand size.
260 */
261VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
262 IEMMODE enmEffOpSize) RT_NOEXCEPT
263{
264 if (enmEffOpSize == IEMMODE_32BIT)
265 {
266 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
267
268 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
269 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
270 pVCpu->cpum.GstCtx.rip = uNewEip;
271 else
272 return iemRaiseGeneralProtectionFault0(pVCpu);
273 }
274 else
275 {
276 Assert(enmEffOpSize == IEMMODE_64BIT);
277
278 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
279 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
280 pVCpu->cpum.GstCtx.rip = uNewRip;
281 else
282 return iemRaiseGeneralProtectionFault0(pVCpu);
283 }
284
285#ifndef IEM_WITH_CODE_TLB
286 /* Flush the prefetch buffer. */
287 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
288#endif
289
290 /*
291 * Clear RF and finish the instruction (maybe raise #DB).
292 */
293 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
294}
295
296/** @} */
297
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette