1 | /* $Id: IEMAllMem.cpp 108589 2025-03-18 10:08:56Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * IEM - Interpreted Execution Manager - Common Memory Routines.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2011-2024 Oracle and/or its affiliates.
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox base platform packages, as
|
---|
10 | * available from https://www.virtualbox.org.
|
---|
11 | *
|
---|
12 | * This program is free software; you can redistribute it and/or
|
---|
13 | * modify it under the terms of the GNU General Public License
|
---|
14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
15 | * License.
|
---|
16 | *
|
---|
17 | * This program is distributed in the hope that it will be useful, but
|
---|
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
20 | * General Public License for more details.
|
---|
21 | *
|
---|
22 | * You should have received a copy of the GNU General Public License
|
---|
23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
24 | *
|
---|
25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
26 | */
|
---|
27 |
|
---|
28 |
|
---|
29 | /*********************************************************************************************************************************
|
---|
30 | * Header Files *
|
---|
31 | *********************************************************************************************************************************/
|
---|
32 | #define LOG_GROUP LOG_GROUP_IEM_MEM
|
---|
33 | #define VMCPU_INCL_CPUM_GST_CTX
|
---|
34 | #ifdef IN_RING0
|
---|
35 | # define VBOX_VMM_TARGET_X86
|
---|
36 | #endif
|
---|
37 | #include <VBox/vmm/iem.h>
|
---|
38 | #include <VBox/vmm/cpum.h>
|
---|
39 | #include <VBox/vmm/pgm.h>
|
---|
40 | #include <VBox/vmm/dbgf.h>
|
---|
41 | #include "IEMInternal.h"
|
---|
42 | #include <VBox/vmm/vmcc.h>
|
---|
43 | #include <VBox/log.h>
|
---|
44 | #include <VBox/err.h>
|
---|
45 | #include <VBox/param.h>
|
---|
46 | #include <iprt/assert.h>
|
---|
47 | #include <iprt/string.h>
|
---|
48 | #include <iprt/x86.h>
|
---|
49 |
|
---|
50 | #include "IEMInline.h"
|
---|
51 | #ifdef VBOX_VMM_TARGET_X86
|
---|
52 | # include "target-x86/IEMAllTlbInline-x86.h"
|
---|
53 | #endif
|
---|
54 |
|
---|
55 |
|
---|
56 | /*********************************************************************************************************************************
|
---|
57 | * Global Variables *
|
---|
58 | *********************************************************************************************************************************/
|
---|
59 | #if defined(IEM_LOG_MEMORY_WRITES)
|
---|
60 | /** What IEM just wrote. */
|
---|
61 | uint8_t g_abIemWrote[256];
|
---|
62 | /** How much IEM just wrote. */
|
---|
63 | size_t g_cbIemWrote;
|
---|
64 | #endif
|
---|
65 |
|
---|
66 |
|
---|
67 | /** @name Memory access.
|
---|
68 | *
|
---|
69 | * @{
|
---|
70 | */
|
---|
71 |
|
---|
72 | /**
|
---|
73 | * Commits a bounce buffer that needs writing back and unmaps it.
|
---|
74 | *
|
---|
75 | * @returns Strict VBox status code.
|
---|
76 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
77 | * @param iMemMap The index of the buffer to commit.
|
---|
78 | * @param fPostponeFail Whether we can postpone writer failures to ring-3.
|
---|
79 | * Always false in ring-3, obviously.
|
---|
80 | */
|
---|
81 | static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
|
---|
82 | {
|
---|
83 | Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
|
---|
84 | Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
|
---|
85 | #ifdef IN_RING3
|
---|
86 | Assert(!fPostponeFail);
|
---|
87 | RT_NOREF_PV(fPostponeFail);
|
---|
88 | #endif
|
---|
89 |
|
---|
90 | /*
|
---|
91 | * Do the writing.
|
---|
92 | */
|
---|
93 | PVMCC pVM = pVCpu->CTX_SUFF(pVM);
|
---|
94 | if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
|
---|
95 | {
|
---|
96 | uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
|
---|
97 | uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
|
---|
98 | uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
|
---|
99 | if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
|
---|
100 | {
|
---|
101 | /*
|
---|
102 | * Carefully and efficiently dealing with access handler return
|
---|
103 | * codes make this a little bloated.
|
---|
104 | */
|
---|
105 | VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
|
---|
106 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
|
---|
107 | pbBuf,
|
---|
108 | cbFirst,
|
---|
109 | PGMACCESSORIGIN_IEM);
|
---|
110 | if (rcStrict == VINF_SUCCESS)
|
---|
111 | {
|
---|
112 | if (cbSecond)
|
---|
113 | {
|
---|
114 | rcStrict = PGMPhysWrite(pVM,
|
---|
115 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
|
---|
116 | pbBuf + cbFirst,
|
---|
117 | cbSecond,
|
---|
118 | PGMACCESSORIGIN_IEM);
|
---|
119 | if (rcStrict == VINF_SUCCESS)
|
---|
120 | { /* nothing */ }
|
---|
121 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
122 | {
|
---|
123 | LogEx(LOG_GROUP_IEM,
|
---|
124 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
|
---|
125 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
126 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
127 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
128 | }
|
---|
129 | #ifndef IN_RING3
|
---|
130 | else if (fPostponeFail)
|
---|
131 | {
|
---|
132 | LogEx(LOG_GROUP_IEM,
|
---|
133 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
|
---|
134 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
135 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
136 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
|
---|
137 | VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
|
---|
138 | return iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
139 | }
|
---|
140 | #endif
|
---|
141 | else
|
---|
142 | {
|
---|
143 | LogEx(LOG_GROUP_IEM,
|
---|
144 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
|
---|
145 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
146 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
147 | return rcStrict;
|
---|
148 | }
|
---|
149 | }
|
---|
150 | }
|
---|
151 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
152 | {
|
---|
153 | if (!cbSecond)
|
---|
154 | {
|
---|
155 | LogEx(LOG_GROUP_IEM,
|
---|
156 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
|
---|
157 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
158 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
159 | }
|
---|
160 | else
|
---|
161 | {
|
---|
162 | VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
|
---|
163 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
|
---|
164 | pbBuf + cbFirst,
|
---|
165 | cbSecond,
|
---|
166 | PGMACCESSORIGIN_IEM);
|
---|
167 | if (rcStrict2 == VINF_SUCCESS)
|
---|
168 | {
|
---|
169 | LogEx(LOG_GROUP_IEM,
|
---|
170 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
|
---|
171 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
|
---|
172 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
|
---|
173 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
174 | }
|
---|
175 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
|
---|
176 | {
|
---|
177 | LogEx(LOG_GROUP_IEM,
|
---|
178 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
|
---|
179 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
|
---|
180 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
|
---|
181 | PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
|
---|
182 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
183 | }
|
---|
184 | #ifndef IN_RING3
|
---|
185 | else if (fPostponeFail)
|
---|
186 | {
|
---|
187 | LogEx(LOG_GROUP_IEM,
|
---|
188 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
|
---|
189 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
190 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
191 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
|
---|
192 | VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
|
---|
193 | return iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
194 | }
|
---|
195 | #endif
|
---|
196 | else
|
---|
197 | {
|
---|
198 | LogEx(LOG_GROUP_IEM,
|
---|
199 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
|
---|
200 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
|
---|
201 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
|
---|
202 | return rcStrict2;
|
---|
203 | }
|
---|
204 | }
|
---|
205 | }
|
---|
206 | #ifndef IN_RING3
|
---|
207 | else if (fPostponeFail)
|
---|
208 | {
|
---|
209 | LogEx(LOG_GROUP_IEM,
|
---|
210 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
|
---|
211 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
212 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
213 | if (!cbSecond)
|
---|
214 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
|
---|
215 | else
|
---|
216 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
|
---|
217 | VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
|
---|
218 | return iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
219 | }
|
---|
220 | #endif
|
---|
221 | else
|
---|
222 | {
|
---|
223 | LogEx(LOG_GROUP_IEM,
|
---|
224 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
|
---|
225 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
|
---|
226 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
|
---|
227 | return rcStrict;
|
---|
228 | }
|
---|
229 | }
|
---|
230 | else
|
---|
231 | {
|
---|
232 | /*
|
---|
233 | * No access handlers, much simpler.
|
---|
234 | */
|
---|
235 | int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
|
---|
236 | if (RT_SUCCESS(rc))
|
---|
237 | {
|
---|
238 | if (cbSecond)
|
---|
239 | {
|
---|
240 | rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
|
---|
241 | if (RT_SUCCESS(rc))
|
---|
242 | { /* likely */ }
|
---|
243 | else
|
---|
244 | {
|
---|
245 | LogEx(LOG_GROUP_IEM,
|
---|
246 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
|
---|
247 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
248 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
|
---|
249 | return rc;
|
---|
250 | }
|
---|
251 | }
|
---|
252 | }
|
---|
253 | else
|
---|
254 | {
|
---|
255 | LogEx(LOG_GROUP_IEM,
|
---|
256 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
|
---|
257 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
|
---|
258 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
|
---|
259 | return rc;
|
---|
260 | }
|
---|
261 | }
|
---|
262 | }
|
---|
263 |
|
---|
264 | #if defined(IEM_LOG_MEMORY_WRITES)
|
---|
265 | Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
|
---|
266 | RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
|
---|
267 | if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
|
---|
268 | Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
|
---|
269 | RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
|
---|
270 | &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
|
---|
271 |
|
---|
272 | size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
|
---|
273 | g_cbIemWrote = cbWrote;
|
---|
274 | memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
|
---|
275 | #endif
|
---|
276 |
|
---|
277 | /*
|
---|
278 | * Free the mapping entry.
|
---|
279 | */
|
---|
280 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
281 | Assert(pVCpu->iem.s.cActiveMappings != 0);
|
---|
282 | pVCpu->iem.s.cActiveMappings--;
|
---|
283 | return VINF_SUCCESS;
|
---|
284 | }
|
---|
285 |
|
---|
286 |
|
---|
287 | /**
|
---|
288 | * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
|
---|
289 | * @todo duplicated
|
---|
290 | */
|
---|
291 | DECL_FORCE_INLINE(uint32_t)
|
---|
292 | iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
|
---|
293 | {
|
---|
294 | bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
|
---|
295 | if (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
296 | return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
|
---|
297 | return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
|
---|
298 | }
|
---|
299 |
|
---|
300 |
|
---|
301 | /**
|
---|
302 | * iemMemMap worker that deals with a request crossing pages.
|
---|
303 | */
|
---|
304 | VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
|
---|
305 | size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess) RT_NOEXCEPT
|
---|
306 | {
|
---|
307 | STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
|
---|
308 | Assert(cbMem <= GUEST_PAGE_SIZE);
|
---|
309 |
|
---|
310 | /*
|
---|
311 | * Do the address translations.
|
---|
312 | */
|
---|
313 | uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
|
---|
314 | RTGCPHYS GCPhysFirst;
|
---|
315 | VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
|
---|
316 | if (rcStrict != VINF_SUCCESS)
|
---|
317 | return rcStrict;
|
---|
318 | Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
|
---|
319 |
|
---|
320 | uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
|
---|
321 | RTGCPHYS GCPhysSecond;
|
---|
322 | rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
|
---|
323 | cbSecondPage, fAccess, &GCPhysSecond);
|
---|
324 | if (rcStrict != VINF_SUCCESS)
|
---|
325 | return rcStrict;
|
---|
326 | Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
|
---|
327 | GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
|
---|
328 |
|
---|
329 | PVMCC pVM = pVCpu->CTX_SUFF(pVM);
|
---|
330 |
|
---|
331 | /*
|
---|
332 | * Check for data breakpoints.
|
---|
333 | */
|
---|
334 | if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
|
---|
335 | { /* likely */ }
|
---|
336 | else
|
---|
337 | {
|
---|
338 | uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
|
---|
339 | fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
|
---|
340 | cbSecondPage, fAccess);
|
---|
341 | #ifdef VBOX_VMM_TARGET_X86
|
---|
342 | pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
|
---|
343 | if (fDataBps > 1)
|
---|
344 | LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
|
---|
345 | fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
346 | #elif defined(VBOX_VMM_TARGET_ARMV8)
|
---|
347 | AssertFailed(); RT_NOREF(fDataBps); /** @todo ARMv8/IEM: implement data breakpoints. */
|
---|
348 | #else
|
---|
349 | # error "port me"
|
---|
350 | #endif
|
---|
351 | }
|
---|
352 |
|
---|
353 | /*
|
---|
354 | * Read in the current memory content if it's a read, execute or partial
|
---|
355 | * write access.
|
---|
356 | */
|
---|
357 | uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
|
---|
358 |
|
---|
359 | if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
|
---|
360 | {
|
---|
361 | if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
|
---|
362 | {
|
---|
363 | /*
|
---|
364 | * Must carefully deal with access handler status codes here,
|
---|
365 | * makes the code a bit bloated.
|
---|
366 | */
|
---|
367 | rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
|
---|
368 | if (rcStrict == VINF_SUCCESS)
|
---|
369 | {
|
---|
370 | rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
|
---|
371 | if (rcStrict == VINF_SUCCESS)
|
---|
372 | { /*likely */ }
|
---|
373 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
374 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
375 | else
|
---|
376 | {
|
---|
377 | LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
|
---|
378 | GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
379 | return rcStrict;
|
---|
380 | }
|
---|
381 | }
|
---|
382 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
383 | {
|
---|
384 | VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
|
---|
385 | if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
|
---|
386 | {
|
---|
387 | PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
|
---|
388 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
389 | }
|
---|
390 | else
|
---|
391 | {
|
---|
392 | LogEx(LOG_GROUP_IEM,
|
---|
393 | ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
|
---|
394 | GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
|
---|
395 | return rcStrict2;
|
---|
396 | }
|
---|
397 | }
|
---|
398 | else
|
---|
399 | {
|
---|
400 | LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
|
---|
401 | GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
402 | return rcStrict;
|
---|
403 | }
|
---|
404 | }
|
---|
405 | else
|
---|
406 | {
|
---|
407 | /*
|
---|
408 | * No informational status codes here, much more straight forward.
|
---|
409 | */
|
---|
410 | int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
|
---|
411 | if (RT_SUCCESS(rc))
|
---|
412 | {
|
---|
413 | Assert(rc == VINF_SUCCESS);
|
---|
414 | rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
|
---|
415 | if (RT_SUCCESS(rc))
|
---|
416 | Assert(rc == VINF_SUCCESS);
|
---|
417 | else
|
---|
418 | {
|
---|
419 | LogEx(LOG_GROUP_IEM,
|
---|
420 | ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
|
---|
421 | return rc;
|
---|
422 | }
|
---|
423 | }
|
---|
424 | else
|
---|
425 | {
|
---|
426 | LogEx(LOG_GROUP_IEM,
|
---|
427 | ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
|
---|
428 | return rc;
|
---|
429 | }
|
---|
430 | }
|
---|
431 | }
|
---|
432 | #ifdef VBOX_STRICT
|
---|
433 | else
|
---|
434 | memset(pbBuf, 0xcc, cbMem);
|
---|
435 | if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
|
---|
436 | memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
|
---|
437 | #endif
|
---|
438 | AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
|
---|
439 |
|
---|
440 | /*
|
---|
441 | * Commit the bounce buffer entry.
|
---|
442 | */
|
---|
443 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
|
---|
444 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
|
---|
445 | pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
|
---|
446 | pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
|
---|
447 | pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
|
---|
448 | pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
|
---|
449 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
|
---|
450 | pVCpu->iem.s.iNextMapping = iMemMap + 1;
|
---|
451 | pVCpu->iem.s.cActiveMappings++;
|
---|
452 |
|
---|
453 | *ppvMem = pbBuf;
|
---|
454 | *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
|
---|
455 | return VINF_SUCCESS;
|
---|
456 | }
|
---|
457 |
|
---|
458 |
|
---|
459 | /**
|
---|
460 | * iemMemMap woker that deals with iemMemPageMap failures.
|
---|
461 | */
|
---|
462 | VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
|
---|
463 | RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap) RT_NOEXCEPT
|
---|
464 | {
|
---|
465 | STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
|
---|
466 |
|
---|
467 | /*
|
---|
468 | * Filter out conditions we can handle and the ones which shouldn't happen.
|
---|
469 | */
|
---|
470 | if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
|
---|
471 | && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
|
---|
472 | && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
|
---|
473 | {
|
---|
474 | AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
|
---|
475 | return rcMap;
|
---|
476 | }
|
---|
477 | pVCpu->iem.s.cPotentialExits++;
|
---|
478 |
|
---|
479 | /*
|
---|
480 | * Read in the current memory content if it's a read, execute or partial
|
---|
481 | * write access.
|
---|
482 | */
|
---|
483 | uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
|
---|
484 | if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
|
---|
485 | {
|
---|
486 | if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
|
---|
487 | memset(pbBuf, 0xff, cbMem);
|
---|
488 | else
|
---|
489 | {
|
---|
490 | int rc;
|
---|
491 | if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
|
---|
492 | {
|
---|
493 | VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
|
---|
494 | if (rcStrict == VINF_SUCCESS)
|
---|
495 | { /* nothing */ }
|
---|
496 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
497 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
498 | else
|
---|
499 | {
|
---|
500 | LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
|
---|
501 | GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
502 | return rcStrict;
|
---|
503 | }
|
---|
504 | }
|
---|
505 | else
|
---|
506 | {
|
---|
507 | rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
|
---|
508 | if (RT_SUCCESS(rc))
|
---|
509 | { /* likely */ }
|
---|
510 | else
|
---|
511 | {
|
---|
512 | LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
|
---|
513 | GCPhysFirst, rc));
|
---|
514 | return rc;
|
---|
515 | }
|
---|
516 | }
|
---|
517 | }
|
---|
518 | }
|
---|
519 | #ifdef VBOX_STRICT
|
---|
520 | else
|
---|
521 | memset(pbBuf, 0xcc, cbMem);
|
---|
522 | #endif
|
---|
523 | #ifdef VBOX_STRICT
|
---|
524 | if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
|
---|
525 | memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
|
---|
526 | #endif
|
---|
527 |
|
---|
528 | /*
|
---|
529 | * Commit the bounce buffer entry.
|
---|
530 | */
|
---|
531 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
|
---|
532 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
|
---|
533 | pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
|
---|
534 | pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
|
---|
535 | pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
|
---|
536 | pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
|
---|
537 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
|
---|
538 | pVCpu->iem.s.iNextMapping = iMemMap + 1;
|
---|
539 | pVCpu->iem.s.cActiveMappings++;
|
---|
540 |
|
---|
541 | *ppvMem = pbBuf;
|
---|
542 | *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
|
---|
543 | return VINF_SUCCESS;
|
---|
544 | }
|
---|
545 |
|
---|
546 |
|
---|
547 |
|
---|
548 | /**
|
---|
549 | * Commits the guest memory if bounce buffered and unmaps it.
|
---|
550 | *
|
---|
551 | * @returns Strict VBox status code.
|
---|
552 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
553 | * @param bUnmapInfo Unmap info set by iemMemMap.
|
---|
554 | */
|
---|
555 | VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
|
---|
556 | {
|
---|
557 | uintptr_t const iMemMap = bUnmapInfo & 0x7;
|
---|
558 | AssertMsgReturn( (bUnmapInfo & 0x08)
|
---|
559 | && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
560 | && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
|
---|
561 | ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
|
---|
562 | VERR_NOT_FOUND);
|
---|
563 |
|
---|
564 | /* If it's bounce buffered, we may need to write back the buffer. */
|
---|
565 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
|
---|
566 | {
|
---|
567 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
568 | return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
|
---|
569 | }
|
---|
570 | /* Otherwise unlock it. */
|
---|
571 | else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
|
---|
572 | PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
573 |
|
---|
574 | /* Free the entry. */
|
---|
575 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
576 | Assert(pVCpu->iem.s.cActiveMappings != 0);
|
---|
577 | pVCpu->iem.s.cActiveMappings--;
|
---|
578 | return VINF_SUCCESS;
|
---|
579 | }
|
---|
580 |
|
---|
581 |
|
---|
582 | /**
|
---|
583 | * Rolls back the guest memory (conceptually only) and unmaps it.
|
---|
584 | *
|
---|
585 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
586 | * @param bUnmapInfo Unmap info set by iemMemMap.
|
---|
587 | */
|
---|
588 | void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
|
---|
589 | {
|
---|
590 | uintptr_t const iMemMap = bUnmapInfo & 0x7;
|
---|
591 | AssertMsgReturnVoid( (bUnmapInfo & 0x08)
|
---|
592 | && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
593 | && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
|
---|
594 | == ((unsigned)bUnmapInfo >> 4),
|
---|
595 | ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
|
---|
596 |
|
---|
597 | /* Unlock it if necessary. */
|
---|
598 | if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
|
---|
599 | PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
600 |
|
---|
601 | /* Free the entry. */
|
---|
602 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
603 | Assert(pVCpu->iem.s.cActiveMappings != 0);
|
---|
604 | pVCpu->iem.s.cActiveMappings--;
|
---|
605 | }
|
---|
606 |
|
---|
607 |
|
---|
608 | /**
|
---|
609 | * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
|
---|
610 | *
|
---|
611 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
612 | * @param bUnmapInfo Unmap info set by iemMemMap.
|
---|
613 | */
|
---|
614 | void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
615 | {
|
---|
616 | uintptr_t const iMemMap = bUnmapInfo & 0x7;
|
---|
617 | AssertMsgReturnVoid( (bUnmapInfo & 0x08)
|
---|
618 | && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
619 | && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
|
---|
620 | == ((unsigned)bUnmapInfo >> 4),
|
---|
621 | ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
|
---|
622 |
|
---|
623 | /* If it's bounce buffered, we may need to write back the buffer. */
|
---|
624 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
|
---|
625 | {
|
---|
626 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
627 | {
|
---|
628 | VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
|
---|
629 | if (rcStrict == VINF_SUCCESS)
|
---|
630 | return;
|
---|
631 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
632 | }
|
---|
633 | }
|
---|
634 | /* Otherwise unlock it. */
|
---|
635 | else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
|
---|
636 | PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
637 |
|
---|
638 | /* Free the entry. */
|
---|
639 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
640 | Assert(pVCpu->iem.s.cActiveMappings != 0);
|
---|
641 | pVCpu->iem.s.cActiveMappings--;
|
---|
642 | }
|
---|
643 |
|
---|
644 |
|
---|
645 | /** Fallback for iemMemCommitAndUnmapRwJmp. */
|
---|
646 | void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
647 | {
|
---|
648 | Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
|
---|
649 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
650 | }
|
---|
651 |
|
---|
652 |
|
---|
653 | /** Fallback for iemMemCommitAndUnmapAtJmp. */
|
---|
654 | void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
655 | {
|
---|
656 | Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
|
---|
657 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
658 | }
|
---|
659 |
|
---|
660 |
|
---|
661 | /** Fallback for iemMemCommitAndUnmapWoJmp. */
|
---|
662 | void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
663 | {
|
---|
664 | Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
|
---|
665 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
666 | }
|
---|
667 |
|
---|
668 |
|
---|
669 | /** Fallback for iemMemCommitAndUnmapRoJmp. */
|
---|
670 | void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
671 | {
|
---|
672 | Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
|
---|
673 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
674 | }
|
---|
675 |
|
---|
676 |
|
---|
677 | /** Fallback for iemMemRollbackAndUnmapWo. */
|
---|
678 | void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
|
---|
679 | {
|
---|
680 | Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
|
---|
681 | iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
|
---|
682 | }
|
---|
683 |
|
---|
684 |
|
---|
685 | #ifndef IN_RING3
|
---|
686 | /**
|
---|
687 | * Commits the guest memory if bounce buffered and unmaps it, if any bounce
|
---|
688 | * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
|
---|
689 | *
|
---|
690 | * Allows the instruction to be completed and retired, while the IEM user will
|
---|
691 | * return to ring-3 immediately afterwards and do the postponed writes there.
|
---|
692 | *
|
---|
693 | * @returns VBox status code (no strict statuses). Caller must check
|
---|
694 | * VMCPU_FF_IEM before repeating string instructions and similar stuff.
|
---|
695 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
696 | * @param pvMem The mapping.
|
---|
697 | * @param fAccess The kind of access.
|
---|
698 | */
|
---|
699 | VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
|
---|
700 | {
|
---|
701 | uintptr_t const iMemMap = bUnmapInfo & 0x7;
|
---|
702 | AssertMsgReturn( (bUnmapInfo & 0x08)
|
---|
703 | && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
704 | && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
|
---|
705 | == ((unsigned)bUnmapInfo >> 4),
|
---|
706 | ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
|
---|
707 | VERR_NOT_FOUND);
|
---|
708 |
|
---|
709 | /* If it's bounce buffered, we may need to write back the buffer. */
|
---|
710 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
|
---|
711 | {
|
---|
712 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
713 | return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
|
---|
714 | }
|
---|
715 | /* Otherwise unlock it. */
|
---|
716 | else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
|
---|
717 | PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
718 |
|
---|
719 | /* Free the entry. */
|
---|
720 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
721 | Assert(pVCpu->iem.s.cActiveMappings != 0);
|
---|
722 | pVCpu->iem.s.cActiveMappings--;
|
---|
723 | return VINF_SUCCESS;
|
---|
724 | }
|
---|
725 | #endif
|
---|
726 |
|
---|
727 |
|
---|
728 | /**
|
---|
729 | * Rollbacks mappings, releasing page locks and such.
|
---|
730 | *
|
---|
731 | * The caller shall only call this after checking cActiveMappings.
|
---|
732 | *
|
---|
733 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
734 | */
|
---|
735 | void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
736 | {
|
---|
737 | Assert(pVCpu->iem.s.cActiveMappings > 0);
|
---|
738 |
|
---|
739 | uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
|
---|
740 | while (iMemMap-- > 0)
|
---|
741 | {
|
---|
742 | uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
|
---|
743 | if (fAccess != IEM_ACCESS_INVALID)
|
---|
744 | {
|
---|
745 | AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
|
---|
746 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
747 | if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
|
---|
748 | PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
749 | AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
|
---|
750 | ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
|
---|
751 | iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
|
---|
752 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
|
---|
753 | pVCpu->iem.s.cActiveMappings--;
|
---|
754 | }
|
---|
755 | }
|
---|
756 | }
|
---|
757 |
|
---|
758 | #undef LOG_GROUP
|
---|
759 | #define LOG_GROUP LOG_GROUP_IEM
|
---|
760 |
|
---|
761 | /** @} */
|
---|
762 |
|
---|
763 |
|
---|
764 | #ifdef IN_RING3
|
---|
765 |
|
---|
766 | /**
|
---|
767 | * Handles the unlikely and probably fatal merge cases.
|
---|
768 | *
|
---|
769 | * @returns Merged status code.
|
---|
770 | * @param rcStrict Current EM status code.
|
---|
771 | * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
|
---|
772 | * with @a rcStrict.
|
---|
773 | * @param iMemMap The memory mapping index. For error reporting only.
|
---|
774 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
775 | * thread, for error reporting only.
|
---|
776 | */
|
---|
777 | DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
|
---|
778 | unsigned iMemMap, PVMCPUCC pVCpu)
|
---|
779 | {
|
---|
780 | if (RT_FAILURE_NP(rcStrict))
|
---|
781 | return rcStrict;
|
---|
782 |
|
---|
783 | if (RT_FAILURE_NP(rcStrictCommit))
|
---|
784 | return rcStrictCommit;
|
---|
785 |
|
---|
786 | if (rcStrict == rcStrictCommit)
|
---|
787 | return rcStrictCommit;
|
---|
788 |
|
---|
789 | AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
|
---|
790 | VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
|
---|
791 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
|
---|
792 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
|
---|
793 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
|
---|
794 | return VERR_IOM_FF_STATUS_IPE;
|
---|
795 | }
|
---|
796 |
|
---|
797 |
|
---|
798 | /**
|
---|
799 | * Helper for IOMR3ProcessForceFlag.
|
---|
800 | *
|
---|
801 | * @returns Merged status code.
|
---|
802 | * @param rcStrict Current EM status code.
|
---|
803 | * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
|
---|
804 | * with @a rcStrict.
|
---|
805 | * @param iMemMap The memory mapping index. For error reporting only.
|
---|
806 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
807 | * thread, for error reporting only.
|
---|
808 | */
|
---|
809 | DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
|
---|
810 | {
|
---|
811 | /* Simple. */
|
---|
812 | if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
|
---|
813 | return rcStrictCommit;
|
---|
814 |
|
---|
815 | if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
|
---|
816 | return rcStrict;
|
---|
817 |
|
---|
818 | /* EM scheduling status codes. */
|
---|
819 | if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
|
---|
820 | && rcStrict <= VINF_EM_LAST))
|
---|
821 | {
|
---|
822 | if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
|
---|
823 | && rcStrictCommit <= VINF_EM_LAST))
|
---|
824 | return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
|
---|
825 | }
|
---|
826 |
|
---|
827 | /* Unlikely */
|
---|
828 | return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
|
---|
829 | }
|
---|
830 |
|
---|
831 |
|
---|
832 | /**
|
---|
833 | * Called by force-flag handling code when VMCPU_FF_IEM is set.
|
---|
834 | *
|
---|
835 | * @returns Merge between @a rcStrict and what the commit operation returned.
|
---|
836 | * @param pVM The cross context VM structure.
|
---|
837 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
---|
838 | * @param rcStrict The status code returned by ring-0 or raw-mode.
|
---|
839 | */
|
---|
840 | VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
|
---|
841 | {
|
---|
842 | /*
|
---|
843 | * Reset the pending commit.
|
---|
844 | */
|
---|
845 | AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
|
---|
846 | & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
|
---|
847 | ("%#x %#x %#x\n",
|
---|
848 | pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
|
---|
849 | VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
|
---|
850 |
|
---|
851 | /*
|
---|
852 | * Commit the pending bounce buffers (usually just one).
|
---|
853 | */
|
---|
854 | unsigned cBufs = 0;
|
---|
855 | unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
|
---|
856 | while (iMemMap-- > 0)
|
---|
857 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
|
---|
858 | {
|
---|
859 | Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
|
---|
860 | Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
|
---|
861 | Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
|
---|
862 |
|
---|
863 | uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
|
---|
864 | uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
|
---|
865 | uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
|
---|
866 |
|
---|
867 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
|
---|
868 | {
|
---|
869 | VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
|
---|
870 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
|
---|
871 | pbBuf,
|
---|
872 | cbFirst,
|
---|
873 | PGMACCESSORIGIN_IEM);
|
---|
874 | rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
|
---|
875 | Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
|
---|
876 | iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
877 | VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
|
---|
878 | }
|
---|
879 |
|
---|
880 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
|
---|
881 | {
|
---|
882 | VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
|
---|
883 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
|
---|
884 | pbBuf + cbFirst,
|
---|
885 | cbSecond,
|
---|
886 | PGMACCESSORIGIN_IEM);
|
---|
887 | rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
|
---|
888 | Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
|
---|
889 | iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
|
---|
890 | VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
|
---|
891 | }
|
---|
892 | cBufs++;
|
---|
893 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
894 | }
|
---|
895 |
|
---|
896 | AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
|
---|
897 | ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
|
---|
898 | pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
|
---|
899 | pVCpu->iem.s.cActiveMappings = 0;
|
---|
900 | return rcStrict;
|
---|
901 | }
|
---|
902 |
|
---|
903 | #endif /* IN_RING3 */
|
---|
904 |
|
---|