VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInline.h

Last change on this file was 108409, checked in by vboxsync, 8 weeks ago

VMM/IEM: Made IEMAll.cpp build targeting arm. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 11.6 KB
Line 
1/* $Id: IEMInline.h 108409 2025-02-27 10:35:39Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Functions, Common.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInline_h
29#define VMM_INCLUDED_SRC_include_IEMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/err.h>
35
36
37/* Documentation and forward declarations for inline functions required for every target: */
38
39RT_NO_WARN_UNUSED_INLINE_PROTOTYPE_BEGIN
40
41RT_NO_WARN_UNUSED_INLINE_PROTOTYPE_END
42
43
44/**
45 * Makes status code addjustments (pass up from I/O and access handler)
46 * as well as maintaining statistics.
47 *
48 * @returns Strict VBox status code to pass up.
49 * @param pVCpu The cross context virtual CPU structure of the calling thread.
50 * @param rcStrict The status from executing an instruction.
51 */
52DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
53{
54 if (rcStrict != VINF_SUCCESS)
55 {
56 /* Deal with the cases that should be treated as VINF_SUCCESS first. */
57 if ( rcStrict == VINF_IEM_YIELD_PENDING_FF
58#ifdef VBOX_WITH_NESTED_HWVIRT_VMX /** @todo r=bird: Why do we need TWO status codes here? */
59 || rcStrict == VINF_VMX_VMEXIT
60#endif
61#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
62 || rcStrict == VINF_SVM_VMEXIT
63#endif
64 )
65 {
66 rcStrict = pVCpu->iem.s.rcPassUp;
67 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
68 { /* likely */ }
69 else
70 pVCpu->iem.s.cRetPassUpStatus++;
71 }
72 else if (RT_SUCCESS(rcStrict))
73 {
74 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
75 || rcStrict == VINF_IOM_R3_IOPORT_READ
76 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
77 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
78 || rcStrict == VINF_IOM_R3_MMIO_READ
79 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
80 || rcStrict == VINF_IOM_R3_MMIO_WRITE
81 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
82 || rcStrict == VINF_CPUM_R3_MSR_READ
83 || rcStrict == VINF_CPUM_R3_MSR_WRITE
84 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
85 || rcStrict == VINF_EM_RAW_TO_R3
86 || rcStrict == VINF_EM_TRIPLE_FAULT
87 || rcStrict == VINF_EM_EMULATE_SPLIT_LOCK
88 || rcStrict == VINF_GIM_R3_HYPERCALL
89 /* raw-mode / virt handlers only: */
90 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
91 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
92 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
93 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
94 || rcStrict == VINF_SELM_SYNC_GDT
95 || rcStrict == VINF_CSAM_PENDING_ACTION
96 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
97 /* nested hw.virt codes: */
98 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
99 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
100 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
101/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
102 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
103 if (rcPassUp == VINF_SUCCESS)
104 pVCpu->iem.s.cRetInfStatuses++;
105 else if ( rcPassUp < VINF_EM_FIRST
106 || rcPassUp > VINF_EM_LAST
107 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
108 {
109 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
110 pVCpu->iem.s.cRetPassUpStatus++;
111 rcStrict = rcPassUp;
112 }
113 else
114 {
115 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
116 pVCpu->iem.s.cRetInfStatuses++;
117 }
118 }
119 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
120 pVCpu->iem.s.cRetAspectNotImplemented++;
121 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
122 pVCpu->iem.s.cRetInstrNotImplemented++;
123 else
124 pVCpu->iem.s.cRetErrStatuses++;
125 }
126 else
127 {
128 rcStrict = pVCpu->iem.s.rcPassUp;
129 if (rcStrict != VINF_SUCCESS)
130 pVCpu->iem.s.cRetPassUpStatus++;
131 }
132
133 /* Just clear it here as well. */
134 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
135
136 return rcStrict;
137}
138
139
140/**
141 * Sets the pass up status.
142 *
143 * @returns VINF_SUCCESS.
144 * @param pVCpu The cross context virtual CPU structure of the
145 * calling thread.
146 * @param rcPassUp The pass up status. Must be informational.
147 * VINF_SUCCESS is not allowed.
148 */
149DECLINLINE(int) iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp) RT_NOEXCEPT
150{
151 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
152
153 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
154 if (rcOldPassUp == VINF_SUCCESS)
155 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
156 /* If both are EM scheduling codes, use EM priority rules. */
157 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
158 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
159 {
160 if (rcPassUp < rcOldPassUp)
161 {
162 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
163 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
164 }
165 else
166 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
167 }
168 /* Override EM scheduling with specific status code. */
169 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
170 {
171 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
172 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
173 }
174 /* Don't override specific status code, first come first served. */
175 else
176 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
177 return VINF_SUCCESS;
178}
179
180
181
182/** @name Memory access.
183 *
184 * @{
185 */
186
187/**
188 * Maps a physical page.
189 *
190 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
192 * @param GCPhysMem The physical address.
193 * @param fAccess The intended access.
194 * @param ppvMem Where to return the mapping address.
195 * @param pLock The PGM lock.
196 */
197DECLINLINE(int) iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
198 void **ppvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
199{
200#ifdef IEM_LOG_MEMORY_WRITES
201 if (fAccess & IEM_ACCESS_TYPE_WRITE)
202 return VERR_PGM_PHYS_TLB_CATCH_ALL;
203#endif
204
205 /** @todo This API may require some improving later. A private deal with PGM
206 * regarding locking and unlocking needs to be struct. A couple of TLBs
207 * living in PGM, but with publicly accessible inlined access methods
208 * could perhaps be an even better solution. */
209 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
210 GCPhysMem,
211 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
212 RT_BOOL(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS),
213 ppvMem,
214 pLock);
215 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
216 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
217
218 return rc;
219}
220
221
222/**
223 * Unmap a page previously mapped by iemMemPageMap.
224 *
225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
226 * @param GCPhysMem The physical address.
227 * @param fAccess The intended access.
228 * @param pvMem What iemMemPageMap returned.
229 * @param pLock The PGM lock.
230 */
231DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
232 const void *pvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
233{
234 NOREF(pVCpu);
235 NOREF(GCPhysMem);
236 NOREF(fAccess);
237 NOREF(pvMem);
238 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
239}
240
241
242/*
243 * Unmap helpers.
244 */
245
246DECL_INLINE_THROW(void) iemMemCommitAndUnmapRwJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
247{
248#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
249 if (RT_LIKELY(bMapInfo == 0))
250 return;
251#endif
252 iemMemCommitAndUnmapRwSafeJmp(pVCpu, bMapInfo);
253}
254
255
256DECL_INLINE_THROW(void) iemMemCommitAndUnmapAtJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
257{
258#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
259 if (RT_LIKELY(bMapInfo == 0))
260 return;
261#endif
262 iemMemCommitAndUnmapAtSafeJmp(pVCpu, bMapInfo);
263}
264
265
266DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
267{
268#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
269 if (RT_LIKELY(bMapInfo == 0))
270 return;
271#endif
272 iemMemCommitAndUnmapWoSafeJmp(pVCpu, bMapInfo);
273}
274
275
276DECL_INLINE_THROW(void) iemMemCommitAndUnmapRoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
277{
278#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
279 if (RT_LIKELY(bMapInfo == 0))
280 return;
281#endif
282 iemMemCommitAndUnmapRoSafeJmp(pVCpu, bMapInfo);
283}
284
285DECLINLINE(void) iemMemRollbackAndUnmapWo(PVMCPUCC pVCpu, uint8_t bMapInfo) RT_NOEXCEPT
286{
287#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
288 if (RT_LIKELY(bMapInfo == 0))
289 return;
290#endif
291 iemMemRollbackAndUnmapWoSafe(pVCpu, bMapInfo);
292}
293
294/** @} */
295
296
297#if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3)
298/**
299 * Adds an entry to the TLB trace buffer.
300 *
301 * @note Don't use directly, only via the IEMTLBTRACE_XXX macros.
302 */
303DECLINLINE(void) iemTlbTrace(PVMCPU pVCpu, IEMTLBTRACETYPE enmType, uint64_t u64Param, uint64_t u64Param2 = 0,
304 uint8_t bParam = 0, uint32_t u32Param = 0/*, uint16_t u16Param = 0 */)
305{
306 uint32_t const fMask = RT_BIT_32(pVCpu->iem.s.cTlbTraceEntriesShift) - 1;
307 PIEMTLBTRACEENTRY const pEntry = &pVCpu->iem.s.paTlbTraceEntries[pVCpu->iem.s.idxTlbTraceEntry++ & fMask];
308 pEntry->u64Param = u64Param;
309 pEntry->u64Param2 = u64Param2;
310 pEntry->u16Param = 0; //u16Param;
311 pEntry->u32Param = u32Param;
312 pEntry->bParam = bParam;
313 pEntry->enmType = enmType;
314 pEntry->rip = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
315}
316#endif
317
318#endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette