VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInline.h@ 108261

Last change on this file since 108261 was 108260, checked in by vboxsync, 3 months ago

VMM/IEM: Splitting up IEMInline.h. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.2 KB
Line 
1/* $Id: IEMInline.h 108260 2025-02-17 15:24:14Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Functions, Common.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInline_h
29#define VMM_INCLUDED_SRC_include_IEMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/err.h>
35
36
37/* Documentation and forward declarations for target specific inline functions: */
38
39/**
40 * Calculates the the IEM_F_XXX flags.
41 *
42 * @returns IEM_F_XXX combination match the current CPU state.
43 * @param pVCpu The cross context virtual CPU structure of the
44 * calling thread.
45 */
46DECL_FORCE_INLINE(uint32_t) iemCalcExecFlags(PVMCPUCC pVCpu) RT_NOEXCEPT;
47
48#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
49/**
50 * Invalidates the decoder state and asserts various stuff - strict builds only.
51 *
52 * @param pVCpu The cross context virtual CPU structure of the
53 * calling thread.
54 */
55DECLINLINE(void) iemInitExecTargetStrict(PVMCPUCC pVCpu) RT_NOEXCEPT;
56#endif
57
58
59
60/**
61 * Makes status code addjustments (pass up from I/O and access handler)
62 * as well as maintaining statistics.
63 *
64 * @returns Strict VBox status code to pass up.
65 * @param pVCpu The cross context virtual CPU structure of the calling thread.
66 * @param rcStrict The status from executing an instruction.
67 */
68DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
69{
70 if (rcStrict != VINF_SUCCESS)
71 {
72 /* Deal with the cases that should be treated as VINF_SUCCESS first. */
73 if ( rcStrict == VINF_IEM_YIELD_PENDING_FF
74#ifdef VBOX_WITH_NESTED_HWVIRT_VMX /** @todo r=bird: Why do we need TWO status codes here? */
75 || rcStrict == VINF_VMX_VMEXIT
76#endif
77#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
78 || rcStrict == VINF_SVM_VMEXIT
79#endif
80 )
81 {
82 rcStrict = pVCpu->iem.s.rcPassUp;
83 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
84 { /* likely */ }
85 else
86 pVCpu->iem.s.cRetPassUpStatus++;
87 }
88 else if (RT_SUCCESS(rcStrict))
89 {
90 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
91 || rcStrict == VINF_IOM_R3_IOPORT_READ
92 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
93 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
94 || rcStrict == VINF_IOM_R3_MMIO_READ
95 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
96 || rcStrict == VINF_IOM_R3_MMIO_WRITE
97 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
98 || rcStrict == VINF_CPUM_R3_MSR_READ
99 || rcStrict == VINF_CPUM_R3_MSR_WRITE
100 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
101 || rcStrict == VINF_EM_RAW_TO_R3
102 || rcStrict == VINF_EM_TRIPLE_FAULT
103 || rcStrict == VINF_EM_EMULATE_SPLIT_LOCK
104 || rcStrict == VINF_GIM_R3_HYPERCALL
105 /* raw-mode / virt handlers only: */
106 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
107 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
108 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
109 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
110 || rcStrict == VINF_SELM_SYNC_GDT
111 || rcStrict == VINF_CSAM_PENDING_ACTION
112 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
113 /* nested hw.virt codes: */
114 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
115 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
116 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
117/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
118 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
119 if (rcPassUp == VINF_SUCCESS)
120 pVCpu->iem.s.cRetInfStatuses++;
121 else if ( rcPassUp < VINF_EM_FIRST
122 || rcPassUp > VINF_EM_LAST
123 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
124 {
125 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
126 pVCpu->iem.s.cRetPassUpStatus++;
127 rcStrict = rcPassUp;
128 }
129 else
130 {
131 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
132 pVCpu->iem.s.cRetInfStatuses++;
133 }
134 }
135 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
136 pVCpu->iem.s.cRetAspectNotImplemented++;
137 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
138 pVCpu->iem.s.cRetInstrNotImplemented++;
139 else
140 pVCpu->iem.s.cRetErrStatuses++;
141 }
142 else
143 {
144 rcStrict = pVCpu->iem.s.rcPassUp;
145 if (rcStrict != VINF_SUCCESS)
146 pVCpu->iem.s.cRetPassUpStatus++;
147 }
148
149 /* Just clear it here as well. */
150 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
151
152 return rcStrict;
153}
154
155
156/**
157 * Sets the pass up status.
158 *
159 * @returns VINF_SUCCESS.
160 * @param pVCpu The cross context virtual CPU structure of the
161 * calling thread.
162 * @param rcPassUp The pass up status. Must be informational.
163 * VINF_SUCCESS is not allowed.
164 */
165DECLINLINE(int) iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp) RT_NOEXCEPT
166{
167 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
168
169 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
170 if (rcOldPassUp == VINF_SUCCESS)
171 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
172 /* If both are EM scheduling codes, use EM priority rules. */
173 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
174 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
175 {
176 if (rcPassUp < rcOldPassUp)
177 {
178 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
179 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
180 }
181 else
182 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
183 }
184 /* Override EM scheduling with specific status code. */
185 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
186 {
187 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
188 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
189 }
190 /* Don't override specific status code, first come first served. */
191 else
192 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
193 return VINF_SUCCESS;
194}
195
196
197#ifndef IEM_WITH_OPAQUE_DECODER_STATE
198
199# if defined(VBOX_INCLUDED_vmm_dbgf_h) || defined(DOXYGEN_RUNNING) /* dbgf.ro.cEnabledHwBreakpoints */
200
201/**
202 * Initializes the execution state.
203 *
204 * @param pVCpu The cross context virtual CPU structure of the
205 * calling thread.
206 * @param fExecOpts Optional execution flags:
207 * - IEM_F_BYPASS_HANDLERS
208 * - IEM_F_X86_DISREGARD_LOCK
209 *
210 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
211 * side-effects in strict builds.
212 */
213DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
214{
215 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
216 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
217
218 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
219 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
220 pVCpu->iem.s.cActiveMappings = 0;
221 pVCpu->iem.s.iNextMapping = 0;
222
223# ifdef VBOX_STRICT
224 iemInitExecTargetStrict(pVCpu);
225# endif
226}
227
228
229# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
230/**
231 * Performs a minimal reinitialization of the execution state.
232 *
233 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
234 * 'world-switch' types operations on the CPU. Currently only nested
235 * hardware-virtualization uses it.
236 *
237 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
238 * @param cbInstr The instruction length (for flushing).
239 */
240DECLINLINE(void) iemReInitExec(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
241{
242 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
243 iemOpcodeFlushHeavy(pVCpu, cbInstr);
244}
245# endif
246
247# endif /* VBOX_INCLUDED_vmm_dbgf_h || DOXYGEN_RUNNING */
248
249/**
250 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
251 *
252 * @param pVCpu The cross context virtual CPU structure of the
253 * calling thread.
254 */
255DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
256{
257 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
258# ifdef VBOX_STRICT
259# ifdef IEM_WITH_CODE_TLB
260 NOREF(pVCpu);
261# else
262 pVCpu->iem.s.cbOpcode = 0;
263# endif
264# else
265 NOREF(pVCpu);
266# endif
267}
268
269
270/**
271 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
272 *
273 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
274 *
275 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
277 * @param rcStrict The status code to fiddle.
278 */
279DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
280{
281 iemUninitExec(pVCpu);
282 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
283}
284
285#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
286
287
288
289/** @name Memory access.
290 *
291 * @{
292 */
293
294/**
295 * Maps a physical page.
296 *
297 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
299 * @param GCPhysMem The physical address.
300 * @param fAccess The intended access.
301 * @param ppvMem Where to return the mapping address.
302 * @param pLock The PGM lock.
303 */
304DECLINLINE(int) iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
305 void **ppvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
306{
307#ifdef IEM_LOG_MEMORY_WRITES
308 if (fAccess & IEM_ACCESS_TYPE_WRITE)
309 return VERR_PGM_PHYS_TLB_CATCH_ALL;
310#endif
311
312 /** @todo This API may require some improving later. A private deal with PGM
313 * regarding locking and unlocking needs to be struct. A couple of TLBs
314 * living in PGM, but with publicly accessible inlined access methods
315 * could perhaps be an even better solution. */
316 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
317 GCPhysMem,
318 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
319 RT_BOOL(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS),
320 ppvMem,
321 pLock);
322 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
323 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
324
325 return rc;
326}
327
328
329/**
330 * Unmap a page previously mapped by iemMemPageMap.
331 *
332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
333 * @param GCPhysMem The physical address.
334 * @param fAccess The intended access.
335 * @param pvMem What iemMemPageMap returned.
336 * @param pLock The PGM lock.
337 */
338DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
339 const void *pvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
340{
341 NOREF(pVCpu);
342 NOREF(GCPhysMem);
343 NOREF(fAccess);
344 NOREF(pvMem);
345 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
346}
347
348
349/*
350 * Unmap helpers.
351 */
352
353#ifdef IEM_WITH_SETJMP
354
355DECL_INLINE_THROW(void) iemMemCommitAndUnmapRwJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
356{
357# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
358 if (RT_LIKELY(bMapInfo == 0))
359 return;
360# endif
361 iemMemCommitAndUnmapRwSafeJmp(pVCpu, bMapInfo);
362}
363
364
365DECL_INLINE_THROW(void) iemMemCommitAndUnmapAtJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
366{
367# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
368 if (RT_LIKELY(bMapInfo == 0))
369 return;
370# endif
371 iemMemCommitAndUnmapAtSafeJmp(pVCpu, bMapInfo);
372}
373
374
375DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
376{
377# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
378 if (RT_LIKELY(bMapInfo == 0))
379 return;
380# endif
381 iemMemCommitAndUnmapWoSafeJmp(pVCpu, bMapInfo);
382}
383
384
385DECL_INLINE_THROW(void) iemMemCommitAndUnmapRoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
386{
387# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
388 if (RT_LIKELY(bMapInfo == 0))
389 return;
390# endif
391 iemMemCommitAndUnmapRoSafeJmp(pVCpu, bMapInfo);
392}
393
394DECLINLINE(void) iemMemRollbackAndUnmapWo(PVMCPUCC pVCpu, uint8_t bMapInfo) RT_NOEXCEPT
395{
396# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
397 if (RT_LIKELY(bMapInfo == 0))
398 return;
399# endif
400 iemMemRollbackAndUnmapWoSafe(pVCpu, bMapInfo);
401}
402
403#endif /* IEM_WITH_SETJMP */
404
405
406/** @} */
407
408
409#if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3)
410/**
411 * Adds an entry to the TLB trace buffer.
412 *
413 * @note Don't use directly, only via the IEMTLBTRACE_XXX macros.
414 */
415DECLINLINE(void) iemTlbTrace(PVMCPU pVCpu, IEMTLBTRACETYPE enmType, uint64_t u64Param, uint64_t u64Param2 = 0,
416 uint8_t bParam = 0, uint32_t u32Param = 0/*, uint16_t u16Param = 0 */)
417{
418 uint32_t const fMask = RT_BIT_32(pVCpu->iem.s.cTlbTraceEntriesShift) - 1;
419 PIEMTLBTRACEENTRY const pEntry = &pVCpu->iem.s.paTlbTraceEntries[pVCpu->iem.s.idxTlbTraceEntry++ & fMask];
420 pEntry->u64Param = u64Param;
421 pEntry->u64Param2 = u64Param2;
422 pEntry->u16Param = 0; //u16Param;
423 pEntry->u32Param = u32Param;
424 pEntry->bParam = bParam;
425 pEntry->enmType = enmType;
426 pEntry->rip = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
427}
428#endif
429
430#endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette