VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInline.h@ 108296

Last change on this file since 108296 was 108290, checked in by vboxsync, 2 months ago

iprt/cdefs.h,VMM/IEM: Added RT_NO_WARN_UNUSED_INLINE_PROTOTYPE_BEGIN/END to cdefs.h for working around warning/error in IEMInline.h with recent gcc versions (13.3 and others). jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.2 KB
Line 
1/* $Id: IEMInline.h 108290 2025-02-19 13:07:27Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Functions, Common.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInline_h
29#define VMM_INCLUDED_SRC_include_IEMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/err.h>
35
36
37/* Documentation and forward declarations for target specific inline functions: */
38
39RT_NO_WARN_UNUSED_INLINE_PROTOTYPE_BEGIN
40
41/**
42 * Calculates the the IEM_F_XXX flags.
43 *
44 * @returns IEM_F_XXX combination match the current CPU state.
45 * @param pVCpu The cross context virtual CPU structure of the
46 * calling thread.
47 */
48DECL_FORCE_INLINE(uint32_t) iemCalcExecFlags(PVMCPUCC pVCpu) RT_NOEXCEPT;
49
50#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
51/**
52 * Invalidates the decoder state and asserts various stuff - strict builds only.
53 *
54 * @param pVCpu The cross context virtual CPU structure of the
55 * calling thread.
56 */
57DECLINLINE(void) iemInitExecTargetStrict(PVMCPUCC pVCpu) RT_NOEXCEPT;
58#endif
59
60RT_NO_WARN_UNUSED_INLINE_PROTOTYPE_END
61
62
63/**
64 * Makes status code addjustments (pass up from I/O and access handler)
65 * as well as maintaining statistics.
66 *
67 * @returns Strict VBox status code to pass up.
68 * @param pVCpu The cross context virtual CPU structure of the calling thread.
69 * @param rcStrict The status from executing an instruction.
70 */
71DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
72{
73 if (rcStrict != VINF_SUCCESS)
74 {
75 /* Deal with the cases that should be treated as VINF_SUCCESS first. */
76 if ( rcStrict == VINF_IEM_YIELD_PENDING_FF
77#ifdef VBOX_WITH_NESTED_HWVIRT_VMX /** @todo r=bird: Why do we need TWO status codes here? */
78 || rcStrict == VINF_VMX_VMEXIT
79#endif
80#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
81 || rcStrict == VINF_SVM_VMEXIT
82#endif
83 )
84 {
85 rcStrict = pVCpu->iem.s.rcPassUp;
86 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
87 { /* likely */ }
88 else
89 pVCpu->iem.s.cRetPassUpStatus++;
90 }
91 else if (RT_SUCCESS(rcStrict))
92 {
93 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
94 || rcStrict == VINF_IOM_R3_IOPORT_READ
95 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
96 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
97 || rcStrict == VINF_IOM_R3_MMIO_READ
98 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
99 || rcStrict == VINF_IOM_R3_MMIO_WRITE
100 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
101 || rcStrict == VINF_CPUM_R3_MSR_READ
102 || rcStrict == VINF_CPUM_R3_MSR_WRITE
103 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
104 || rcStrict == VINF_EM_RAW_TO_R3
105 || rcStrict == VINF_EM_TRIPLE_FAULT
106 || rcStrict == VINF_EM_EMULATE_SPLIT_LOCK
107 || rcStrict == VINF_GIM_R3_HYPERCALL
108 /* raw-mode / virt handlers only: */
109 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
110 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
111 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
112 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
113 || rcStrict == VINF_SELM_SYNC_GDT
114 || rcStrict == VINF_CSAM_PENDING_ACTION
115 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
116 /* nested hw.virt codes: */
117 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
118 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
119 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
120/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
121 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
122 if (rcPassUp == VINF_SUCCESS)
123 pVCpu->iem.s.cRetInfStatuses++;
124 else if ( rcPassUp < VINF_EM_FIRST
125 || rcPassUp > VINF_EM_LAST
126 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
127 {
128 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
129 pVCpu->iem.s.cRetPassUpStatus++;
130 rcStrict = rcPassUp;
131 }
132 else
133 {
134 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
135 pVCpu->iem.s.cRetInfStatuses++;
136 }
137 }
138 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
139 pVCpu->iem.s.cRetAspectNotImplemented++;
140 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
141 pVCpu->iem.s.cRetInstrNotImplemented++;
142 else
143 pVCpu->iem.s.cRetErrStatuses++;
144 }
145 else
146 {
147 rcStrict = pVCpu->iem.s.rcPassUp;
148 if (rcStrict != VINF_SUCCESS)
149 pVCpu->iem.s.cRetPassUpStatus++;
150 }
151
152 /* Just clear it here as well. */
153 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
154
155 return rcStrict;
156}
157
158
159/**
160 * Sets the pass up status.
161 *
162 * @returns VINF_SUCCESS.
163 * @param pVCpu The cross context virtual CPU structure of the
164 * calling thread.
165 * @param rcPassUp The pass up status. Must be informational.
166 * VINF_SUCCESS is not allowed.
167 */
168DECLINLINE(int) iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp) RT_NOEXCEPT
169{
170 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
171
172 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
173 if (rcOldPassUp == VINF_SUCCESS)
174 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
175 /* If both are EM scheduling codes, use EM priority rules. */
176 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
177 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
178 {
179 if (rcPassUp < rcOldPassUp)
180 {
181 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
182 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
183 }
184 else
185 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
186 }
187 /* Override EM scheduling with specific status code. */
188 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
189 {
190 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
191 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
192 }
193 /* Don't override specific status code, first come first served. */
194 else
195 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
196 return VINF_SUCCESS;
197}
198
199
200#ifndef IEM_WITH_OPAQUE_DECODER_STATE
201
202# if defined(VBOX_INCLUDED_vmm_dbgf_h) || defined(DOXYGEN_RUNNING) /* dbgf.ro.cEnabledHwBreakpoints */
203
204/**
205 * Initializes the execution state.
206 *
207 * @param pVCpu The cross context virtual CPU structure of the
208 * calling thread.
209 * @param fExecOpts Optional execution flags:
210 * - IEM_F_BYPASS_HANDLERS
211 * - IEM_F_X86_DISREGARD_LOCK
212 *
213 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
214 * side-effects in strict builds.
215 */
216DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
217{
218 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
219 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
220
221 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
222 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
223 pVCpu->iem.s.cActiveMappings = 0;
224 pVCpu->iem.s.iNextMapping = 0;
225
226# ifdef VBOX_STRICT
227 iemInitExecTargetStrict(pVCpu);
228# endif
229}
230
231
232# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
233/**
234 * Performs a minimal reinitialization of the execution state.
235 *
236 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
237 * 'world-switch' types operations on the CPU. Currently only nested
238 * hardware-virtualization uses it.
239 *
240 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
241 * @param cbInstr The instruction length (for flushing).
242 */
243DECLINLINE(void) iemReInitExec(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
244{
245 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
246 iemOpcodeFlushHeavy(pVCpu, cbInstr);
247}
248# endif
249
250# endif /* VBOX_INCLUDED_vmm_dbgf_h || DOXYGEN_RUNNING */
251
252/**
253 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
254 *
255 * @param pVCpu The cross context virtual CPU structure of the
256 * calling thread.
257 */
258DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
259{
260 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
261# ifdef VBOX_STRICT
262# ifdef IEM_WITH_CODE_TLB
263 NOREF(pVCpu);
264# else
265 pVCpu->iem.s.cbOpcode = 0;
266# endif
267# else
268 NOREF(pVCpu);
269# endif
270}
271
272
273/**
274 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
275 *
276 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
277 *
278 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
280 * @param rcStrict The status code to fiddle.
281 */
282DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
283{
284 iemUninitExec(pVCpu);
285 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
286}
287
288#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
289
290
291
292/** @name Memory access.
293 *
294 * @{
295 */
296
297/**
298 * Maps a physical page.
299 *
300 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
302 * @param GCPhysMem The physical address.
303 * @param fAccess The intended access.
304 * @param ppvMem Where to return the mapping address.
305 * @param pLock The PGM lock.
306 */
307DECLINLINE(int) iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
308 void **ppvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
309{
310#ifdef IEM_LOG_MEMORY_WRITES
311 if (fAccess & IEM_ACCESS_TYPE_WRITE)
312 return VERR_PGM_PHYS_TLB_CATCH_ALL;
313#endif
314
315 /** @todo This API may require some improving later. A private deal with PGM
316 * regarding locking and unlocking needs to be struct. A couple of TLBs
317 * living in PGM, but with publicly accessible inlined access methods
318 * could perhaps be an even better solution. */
319 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
320 GCPhysMem,
321 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
322 RT_BOOL(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS),
323 ppvMem,
324 pLock);
325 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
326 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
327
328 return rc;
329}
330
331
332/**
333 * Unmap a page previously mapped by iemMemPageMap.
334 *
335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
336 * @param GCPhysMem The physical address.
337 * @param fAccess The intended access.
338 * @param pvMem What iemMemPageMap returned.
339 * @param pLock The PGM lock.
340 */
341DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
342 const void *pvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
343{
344 NOREF(pVCpu);
345 NOREF(GCPhysMem);
346 NOREF(fAccess);
347 NOREF(pvMem);
348 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
349}
350
351
352/*
353 * Unmap helpers.
354 */
355
356DECL_INLINE_THROW(void) iemMemCommitAndUnmapRwJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
357{
358#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
359 if (RT_LIKELY(bMapInfo == 0))
360 return;
361#endif
362 iemMemCommitAndUnmapRwSafeJmp(pVCpu, bMapInfo);
363}
364
365
366DECL_INLINE_THROW(void) iemMemCommitAndUnmapAtJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
367{
368#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
369 if (RT_LIKELY(bMapInfo == 0))
370 return;
371#endif
372 iemMemCommitAndUnmapAtSafeJmp(pVCpu, bMapInfo);
373}
374
375
376DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
377{
378#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
379 if (RT_LIKELY(bMapInfo == 0))
380 return;
381#endif
382 iemMemCommitAndUnmapWoSafeJmp(pVCpu, bMapInfo);
383}
384
385
386DECL_INLINE_THROW(void) iemMemCommitAndUnmapRoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
387{
388#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
389 if (RT_LIKELY(bMapInfo == 0))
390 return;
391#endif
392 iemMemCommitAndUnmapRoSafeJmp(pVCpu, bMapInfo);
393}
394
395DECLINLINE(void) iemMemRollbackAndUnmapWo(PVMCPUCC pVCpu, uint8_t bMapInfo) RT_NOEXCEPT
396{
397#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
398 if (RT_LIKELY(bMapInfo == 0))
399 return;
400#endif
401 iemMemRollbackAndUnmapWoSafe(pVCpu, bMapInfo);
402}
403
404/** @} */
405
406
407#if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3)
408/**
409 * Adds an entry to the TLB trace buffer.
410 *
411 * @note Don't use directly, only via the IEMTLBTRACE_XXX macros.
412 */
413DECLINLINE(void) iemTlbTrace(PVMCPU pVCpu, IEMTLBTRACETYPE enmType, uint64_t u64Param, uint64_t u64Param2 = 0,
414 uint8_t bParam = 0, uint32_t u32Param = 0/*, uint16_t u16Param = 0 */)
415{
416 uint32_t const fMask = RT_BIT_32(pVCpu->iem.s.cTlbTraceEntriesShift) - 1;
417 PIEMTLBTRACEENTRY const pEntry = &pVCpu->iem.s.paTlbTraceEntries[pVCpu->iem.s.idxTlbTraceEntry++ & fMask];
418 pEntry->u64Param = u64Param;
419 pEntry->u64Param2 = u64Param2;
420 pEntry->u16Param = 0; //u16Param;
421 pEntry->u32Param = u32Param;
422 pEntry->bParam = bParam;
423 pEntry->enmType = enmType;
424 pEntry->rip = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
425}
426#endif
427
428#endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette