1 | /* $Id: IEMAllMem-armv8.cpp 108791 2025-03-28 21:58:31Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * IEM - Interpreted Execution Manager - ARMV8 target, memory.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2011-2024 Oracle and/or its affiliates.
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox base platform packages, as
|
---|
10 | * available from https://www.virtualbox.org.
|
---|
11 | *
|
---|
12 | * This program is free software; you can redistribute it and/or
|
---|
13 | * modify it under the terms of the GNU General Public License
|
---|
14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
15 | * License.
|
---|
16 | *
|
---|
17 | * This program is distributed in the hope that it will be useful, but
|
---|
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
20 | * General Public License for more details.
|
---|
21 | *
|
---|
22 | * You should have received a copy of the GNU General Public License
|
---|
23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
24 | *
|
---|
25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
26 | */
|
---|
27 |
|
---|
28 |
|
---|
29 | /*********************************************************************************************************************************
|
---|
30 | * Header Files *
|
---|
31 | *********************************************************************************************************************************/
|
---|
32 | #define LOG_GROUP LOG_GROUP_IEM_MEM
|
---|
33 | #define VMCPU_INCL_CPUM_GST_CTX
|
---|
34 | #include <VBox/vmm/iem.h>
|
---|
35 | #include <VBox/vmm/cpum.h>
|
---|
36 | #include <VBox/vmm/pgm.h>
|
---|
37 | #include <VBox/vmm/dbgf.h>
|
---|
38 | #include "IEMInternal.h"
|
---|
39 | #include <VBox/vmm/vmcc.h>
|
---|
40 | #include <VBox/log.h>
|
---|
41 | #include <VBox/err.h>
|
---|
42 | #include <iprt/assert.h>
|
---|
43 | #include <iprt/string.h>
|
---|
44 | #include <iprt/armv8.h>
|
---|
45 |
|
---|
46 | #include "IEMInline.h"
|
---|
47 | #include "IEMInline-armv8.h"
|
---|
48 | /// @todo #include "IEMInlineMem-armv8.h"
|
---|
49 | #include "IEMAllTlbInline-armv8.h"
|
---|
50 |
|
---|
51 |
|
---|
52 | /** @name Memory access.
|
---|
53 | *
|
---|
54 | * @{
|
---|
55 | */
|
---|
56 |
|
---|
57 | /**
|
---|
58 | * Converts IEM_ACCESS_XXX + fExec to PGMQPAGE_F_XXX.
|
---|
59 | */
|
---|
60 | DECL_FORCE_INLINE(uint32_t) iemMemArmAccessToQPage(PVMCPUCC pVCpu, uint32_t fAccess)
|
---|
61 | {
|
---|
62 | AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
|
---|
63 | AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
|
---|
64 | AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
|
---|
65 | /** @todo IEMTLBE_F_EFF_U_NO_GCS / IEMTLBE_F_EFF_P_NO_GCS,
|
---|
66 | * IEMTLBE_F_S1_NS/NSE, IEMTLBE_F_S2_NO_LIM_WRITE/TL0/TL1. */
|
---|
67 | return (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
|
---|
68 | | (!(fAccess & IEM_ACCESS_WHAT_SYS) && IEM_F_MODE_ARM_GET_EL(pVCpu->iem.s.fExec) == 0 ? PGMQPAGE_F_USER_MODE : 0);
|
---|
69 | }
|
---|
70 |
|
---|
71 |
|
---|
72 |
|
---|
73 | /**
|
---|
74 | * Translates a virtual address to a physical physical address and checks if we
|
---|
75 | * can access the page as specified.
|
---|
76 | *
|
---|
77 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
78 | * @param GCPtrMem The virtual address.
|
---|
79 | * @param cbAccess The access size, for raising \#PF correctly for
|
---|
80 | * FXSAVE and such.
|
---|
81 | * @param fAccess The intended access.
|
---|
82 | * @param pGCPhysMem Where to return the physical address.
|
---|
83 | */
|
---|
84 | VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
|
---|
85 | uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
|
---|
86 | {
|
---|
87 | Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
|
---|
88 | PGMPTWALKFAST WalkFast;
|
---|
89 | int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, iemMemArmAccessToQPage(pVCpu, fAccess), &WalkFast);
|
---|
90 | if (RT_SUCCESS(rc))
|
---|
91 | {
|
---|
92 | Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
|
---|
93 |
|
---|
94 | /* If the page is writable and does not have the no-exec bit set, all
|
---|
95 | access is allowed. Otherwise we'll have to check more carefully... */
|
---|
96 | #if 0 /** @todo rewrite to arm */
|
---|
97 | Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
|
---|
98 | || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
99 | || (WalkFast.fEffective & X86_PTE_RW)
|
---|
100 | || ( ( IEM_GET_CPL(pVCpu) != 3
|
---|
101 | || (fAccess & IEM_ACCESS_WHAT_SYS))
|
---|
102 | && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
|
---|
103 | && ( (WalkFast.fEffective & X86_PTE_US)
|
---|
104 | || IEM_GET_CPL(pVCpu) != 3
|
---|
105 | || (fAccess & IEM_ACCESS_WHAT_SYS) )
|
---|
106 | && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
|
---|
107 | || !(WalkFast.fEffective & X86_PTE_PAE_NX)
|
---|
108 | || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
|
---|
109 | )
|
---|
110 | );
|
---|
111 |
|
---|
112 | /* PGMGstQueryPageFast sets the A & D bits. */
|
---|
113 | /** @todo testcase: check when A and D bits are actually set by the CPU. */
|
---|
114 | Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
|
---|
115 | #endif
|
---|
116 |
|
---|
117 | *pGCPhysMem = WalkFast.GCPhys;
|
---|
118 | return VINF_SUCCESS;
|
---|
119 | }
|
---|
120 |
|
---|
121 | LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
|
---|
122 | /** @todo Check unassigned memory in unpaged mode. */
|
---|
123 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
124 | if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
|
---|
125 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
|
---|
126 | #endif
|
---|
127 | *pGCPhysMem = NIL_RTGCPHYS;
|
---|
128 | return iemRaiseDataAbortFromWalk(pVCpu, GCPtrMem, cbAccess, fAccess, rc, &WalkFast);
|
---|
129 | }
|
---|
130 |
|
---|
131 |
|
---|
132 | /**
|
---|
133 | * Finds a free memmap entry when using iNextMapping doesn't work.
|
---|
134 | *
|
---|
135 | * @returns Memory mapping index, 1024 on failure.
|
---|
136 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
137 | */
|
---|
138 | static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
|
---|
139 | {
|
---|
140 | /*
|
---|
141 | * The typical case.
|
---|
142 | */
|
---|
143 | if (pVCpu->iem.s.cActiveMappings == 0)
|
---|
144 | {
|
---|
145 | pVCpu->iem.s.iNextMapping = 1;
|
---|
146 | return 0;
|
---|
147 | }
|
---|
148 |
|
---|
149 | /* There should be enough mappings for all instructions. */
|
---|
150 | AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
|
---|
151 |
|
---|
152 | for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
|
---|
153 | if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
|
---|
154 | return i;
|
---|
155 |
|
---|
156 | AssertFailedReturn(1024);
|
---|
157 | }
|
---|
158 |
|
---|
159 |
|
---|
160 | #ifdef IEM_WITH_DATA_TLB
|
---|
161 | /**
|
---|
162 | * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
|
---|
163 | * @todo duplicated
|
---|
164 | */
|
---|
165 | DECL_FORCE_INLINE(uint32_t)
|
---|
166 | iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
|
---|
167 | {
|
---|
168 | bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
|
---|
169 | if (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
170 | return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
|
---|
171 | return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
|
---|
172 | }
|
---|
173 | #endif
|
---|
174 |
|
---|
175 |
|
---|
176 | /**
|
---|
177 | * Converts PGM_PTATTRS_XXX to IEMTLBE_F_XXX.
|
---|
178 | */
|
---|
179 | DECL_FORCE_INLINE(uint64_t) iemMemArmPtAttrsToTlbeFlags(uint64_t fPtAttrs)
|
---|
180 | {
|
---|
181 | /** @todo stage 2 stuff, IEMTLBE_F_EFF_AMEC, PGM_PTATTRS_NT_SHIFT,
|
---|
182 | * PGM_PTATTRS_GP_SHIFT */
|
---|
183 | AssertCompile( PGM_PTATTRS_PR_SHIFT + 1 == PGM_PTATTRS_PW_SHIFT
|
---|
184 | && PGM_PTATTRS_PR_SHIFT + 2 == PGM_PTATTRS_PX_SHIFT
|
---|
185 | && PGM_PTATTRS_PR_SHIFT + 3 == PGM_PTATTRS_PGCS_SHIFT
|
---|
186 | && PGM_PTATTRS_PR_SHIFT + 4 == PGM_PTATTRS_UR_SHIFT
|
---|
187 | && PGM_PTATTRS_PR_SHIFT + 5 == PGM_PTATTRS_UW_SHIFT
|
---|
188 | && PGM_PTATTRS_PR_SHIFT + 6 == PGM_PTATTRS_UX_SHIFT
|
---|
189 | && PGM_PTATTRS_PR_SHIFT + 7 == PGM_PTATTRS_UGCS_SHIFT);
|
---|
190 | AssertCompile( IEMTLBE_F_EFF_P_NO_READ_BIT + 1 == IEMTLBE_F_EFF_P_NO_WRITE_BIT
|
---|
191 | && IEMTLBE_F_EFF_P_NO_READ_BIT + 2 == IEMTLBE_F_EFF_P_NO_EXEC_BIT
|
---|
192 | && IEMTLBE_F_EFF_P_NO_READ_BIT + 3 == IEMTLBE_F_EFF_P_NO_GCS_BIT
|
---|
193 | && IEMTLBE_F_EFF_P_NO_READ_BIT + 4 == IEMTLBE_F_EFF_U_NO_READ_BIT
|
---|
194 | && IEMTLBE_F_EFF_P_NO_READ_BIT + 5 == IEMTLBE_F_EFF_U_NO_WRITE_BIT
|
---|
195 | && IEMTLBE_F_EFF_P_NO_READ_BIT + 6 == IEMTLBE_F_EFF_U_NO_EXEC_BIT
|
---|
196 | && IEMTLBE_F_EFF_P_NO_READ_BIT + 7 == IEMTLBE_F_EFF_U_NO_GCS_BIT);
|
---|
197 | AssertCompile(IEMTLBE_F_EFF_P_NO_WRITE_BIT < PGM_PTATTRS_PR_SHIFT);
|
---|
198 | uint64_t const fInv = fPtAttrs;
|
---|
199 | uint64_t fTlbe = (fInv >> (PGM_PTATTRS_PR_SHIFT - IEMTLBE_F_EFF_P_NO_WRITE_BIT))
|
---|
200 | & ( IEMTLBE_F_EFF_P_NO_READ
|
---|
201 | | IEMTLBE_F_EFF_P_NO_WRITE
|
---|
202 | | IEMTLBE_F_EFF_P_NO_EXEC
|
---|
203 | | IEMTLBE_F_EFF_P_NO_GCS
|
---|
204 | | IEMTLBE_F_EFF_U_NO_READ
|
---|
205 | | IEMTLBE_F_EFF_U_NO_WRITE
|
---|
206 | | IEMTLBE_F_EFF_U_NO_EXEC
|
---|
207 | | IEMTLBE_F_EFF_U_NO_GCS);
|
---|
208 |
|
---|
209 | AssertCompile(IEMTLBE_F_EFF_NO_DIRTY_BIT > PGM_PTATTRS_ND_SHIFT);
|
---|
210 | fTlbe |= (fPtAttrs << (IEMTLBE_F_EFF_NO_DIRTY_BIT - PGM_PTATTRS_ND_SHIFT)) & IEMTLBE_F_EFF_NO_DIRTY;
|
---|
211 |
|
---|
212 | AssertCompile(PGM_PTATTRS_NS_SHIFT + 1 == PGM_PTATTRS_NSE_SHIFT);
|
---|
213 | AssertCompile(IEMTLBE_F_S1_NS_BIT + 1 == IEMTLBE_F_S1_NSE_BIT);
|
---|
214 | AssertCompile(IEMTLBE_F_S1_NS_BIT > PGM_PTATTRS_NS_SHIFT);
|
---|
215 | fTlbe |= (fPtAttrs << (IEMTLBE_F_S1_NS_BIT - PGM_PTATTRS_NS_SHIFT)) & (IEMTLBE_F_S1_NS | IEMTLBE_F_S1_NSE);
|
---|
216 |
|
---|
217 | if (fPtAttrs & (PGM_PTATTRS_DEVICE_MASK | PGM_PTATTRS_S2_DEVICE_MASK))
|
---|
218 | fTlbe |= IEMTLBE_F_EFF_DEVICE;
|
---|
219 |
|
---|
220 | return fTlbe;
|
---|
221 | }
|
---|
222 |
|
---|
223 |
|
---|
224 | /**
|
---|
225 | * Maps the specified guest memory for the given kind of access.
|
---|
226 | *
|
---|
227 | * This may be using bounce buffering of the memory if it's crossing a page
|
---|
228 | * boundary or if there is an access handler installed for any of it. Because
|
---|
229 | * of lock prefix guarantees, we're in for some extra clutter when this
|
---|
230 | * happens.
|
---|
231 | *
|
---|
232 | * This may raise a \#GP, \#SS, \#PF or \#AC.
|
---|
233 | *
|
---|
234 | * @returns VBox strict status code.
|
---|
235 | *
|
---|
236 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
237 | * @param ppvMem Where to return the pointer to the mapped memory.
|
---|
238 | * @param pbUnmapInfo Where to return unmap info to be passed to
|
---|
239 | * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
|
---|
240 | * done.
|
---|
241 | * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
|
---|
242 | * 8, 12, 16, 32 or 512. When used by string operations
|
---|
243 | * it can be up to a page.
|
---|
244 | * @param GCPtrMem The address of the guest memory.
|
---|
245 | * @param fAccess How the memory is being accessed. The
|
---|
246 | * IEM_ACCESS_TYPE_XXX part is used to figure out how to
|
---|
247 | * map the memory, while the IEM_ACCESS_WHAT_XXX part is
|
---|
248 | * used when raising exceptions. The IEM_ACCESS_ATOMIC and
|
---|
249 | * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
|
---|
250 | * set.
|
---|
251 | * @param uAlignCtl Alignment control:
|
---|
252 | * - Bits 15:0 is the alignment mask.
|
---|
253 | * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
|
---|
254 | * IEM_MEMMAP_F_ALIGN_SSE, and
|
---|
255 | * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
|
---|
256 | * Pass zero to skip alignment.
|
---|
257 | */
|
---|
258 | VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, RTGCPTR GCPtrMem,
|
---|
259 | uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
|
---|
260 | {
|
---|
261 | STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);
|
---|
262 |
|
---|
263 | /*
|
---|
264 | * Check the input and figure out which mapping entry to use.
|
---|
265 | */
|
---|
266 | Assert(cbMem <= 64);
|
---|
267 | Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
|
---|
268 | Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
|
---|
269 | Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
|
---|
270 |
|
---|
271 | unsigned iMemMap = pVCpu->iem.s.iNextMapping;
|
---|
272 | if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
273 | || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
|
---|
274 | {
|
---|
275 | iMemMap = iemMemMapFindFree(pVCpu);
|
---|
276 | AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
|
---|
277 | ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
|
---|
278 | pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
|
---|
279 | pVCpu->iem.s.aMemMappings[2].fAccess),
|
---|
280 | VERR_IEM_IPE_9);
|
---|
281 | }
|
---|
282 |
|
---|
283 | /*
|
---|
284 | * Map the memory, checking that we can actually access it. If something
|
---|
285 | * slightly complicated happens, fall back on bounce buffering.
|
---|
286 | */
|
---|
287 | if ((GCPtrMem & GUEST_MIN_PAGE_OFFSET_MASK) + cbMem <= GUEST_MIN_PAGE_SIZE) /* Crossing a possible page/tlb boundary? */
|
---|
288 | { /* likely */ }
|
---|
289 | else if ( (GCPtrMem & IEM_F_ARM_GET_TLB_PAGE_OFFSET_MASK(pVCpu->iem.s.fExec)) + cbMem
|
---|
290 | > IEM_F_ARM_GET_TLB_PAGE_OFFSET_MASK(pVCpu->iem.s.fExec))
|
---|
291 | return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
|
---|
292 |
|
---|
293 | /*
|
---|
294 | * Alignment check.
|
---|
295 | */
|
---|
296 | if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
|
---|
297 | { /* likely */ }
|
---|
298 | else
|
---|
299 | {
|
---|
300 | #if 0 /** @todo ARM: Implement alignment checks as we implement instructions... */
|
---|
301 | /* Misaligned access. */
|
---|
302 | if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
|
---|
303 | {
|
---|
304 | if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
|
---|
305 | || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
|
---|
306 | && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
|
---|
307 | {
|
---|
308 | AssertCompile(X86_CR0_AM == X86_EFL_AC);
|
---|
309 |
|
---|
310 | if (!iemMemAreAlignmentChecksEnabled(pVCpu))
|
---|
311 | { /* likely */ }
|
---|
312 | else
|
---|
313 | return iemRaiseAlignmentCheckException(pVCpu);
|
---|
314 | }
|
---|
315 | else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
|
---|
316 | && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
|
---|
317 | /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
|
---|
318 | * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
|
---|
319 | * that's what FXSAVE does on a 10980xe. */
|
---|
320 | && iemMemAreAlignmentChecksEnabled(pVCpu))
|
---|
321 | return iemRaiseAlignmentCheckException(pVCpu);
|
---|
322 | else
|
---|
323 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
324 | }
|
---|
325 | #endif
|
---|
326 |
|
---|
327 | #if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
|
---|
328 | /* If the access is atomic there are host platform alignmnet restrictions
|
---|
329 | we need to conform with. */
|
---|
330 | if ( !(fAccess & IEM_ACCESS_ATOMIC)
|
---|
331 | # if defined(RT_ARCH_AMD64)
|
---|
332 | || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
|
---|
333 | # elif defined(RT_ARCH_ARM64)
|
---|
334 | || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
|
---|
335 | # else
|
---|
336 | # error port me
|
---|
337 | # endif
|
---|
338 | )
|
---|
339 | { /* okay */ }
|
---|
340 | else
|
---|
341 | {
|
---|
342 | LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
|
---|
343 | pVCpu->iem.s.cMisalignedAtomics += 1;
|
---|
344 | return VINF_EM_EMULATE_SPLIT_LOCK;
|
---|
345 | }
|
---|
346 | #endif
|
---|
347 | }
|
---|
348 |
|
---|
349 | #ifdef IEM_WITH_DATA_TLB
|
---|
350 | Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
|
---|
351 |
|
---|
352 | /*
|
---|
353 | * Get the TLB entry for this page and check PT flags.
|
---|
354 | *
|
---|
355 | * We reload the TLB entry if we need to set the dirty bit (accessed
|
---|
356 | * should in theory always be set).
|
---|
357 | */
|
---|
358 | uint8_t *pbMem = NULL;
|
---|
359 | uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(pVCpu, GCPtrMem);
|
---|
360 | PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
|
---|
361 | bool const fPrivileged = IEM_F_MODE_ARM_GET_EL(pVCpu->iem.s.fExec) > 0 || (fAccess & IEM_ACCESS_WHAT_SYS);
|
---|
362 | uint64_t const fTlbeAcc = fPrivileged
|
---|
363 | ? (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_EFF_P_NO_READ : 0)
|
---|
364 | | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_EFF_P_NO_WRITE | IEMTLBE_F_EFF_NO_DIRTY : 0)
|
---|
365 | : (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_EFF_U_NO_READ : 0)
|
---|
366 | | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_EFF_U_NO_WRITE | IEMTLBE_F_EFF_NO_DIRTY : 0);
|
---|
367 | /** @todo IEMTLBE_F_EFF_U_NO_GCS / IEMTLBE_F_EFF_P_NO_GCS,
|
---|
368 | * IEMTLBE_F_S1_NS/NSE, IEMTLBE_F_S2_NO_LIM_WRITE/TL0/TL1. */
|
---|
369 | /** @todo Make sure the TLB is flushed when changing the page size or
|
---|
370 | * somehow deal with that as well here? */
|
---|
371 | /** @todo If the access incompatible, we currently trigger a PT walk,
|
---|
372 | * which isn't necessarily correct... */
|
---|
373 | if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
|
---|
374 | && (pTlbe->fFlagsAndPhysRev & (fTlbeAcc | IEMTLBE_F_S1_ASID | IEMTLBE_F_S2_VMID))
|
---|
375 | == (pVCpu->iem.s.DataTlb.uTlbPhysRev & (IEMTLBE_F_S1_ASID | IEMTLBE_F_S2_VMID)) )
|
---|
376 | || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
|
---|
377 | && (pTlbe->fFlagsAndPhysRev & (fTlbeAcc | IEMTLBE_F_S2_VMID))
|
---|
378 | == (pVCpu->iem.s.DataTlb.uTlbPhysRev & (IEMTLBE_F_S2_VMID)) ) )
|
---|
379 | {
|
---|
380 | # ifdef IEM_WITH_TLB_STATISTICS
|
---|
381 | pVCpu->iem.s.DataTlb.cTlbCoreHits++;
|
---|
382 | # endif
|
---|
383 |
|
---|
384 | /* Look up the physical page info if necessary. */
|
---|
385 | if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == (pVCpu->iem.s.DataTlb.uTlbPhysRev & IEMTLBE_F_PHYS_REV))
|
---|
386 | # ifdef IN_RING3
|
---|
387 | pbMem = pTlbe->pbMappingR3;
|
---|
388 | # else
|
---|
389 | pbMem = NULL;
|
---|
390 | # endif
|
---|
391 | else
|
---|
392 | {
|
---|
393 | if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
|
---|
394 | { /* likely */ }
|
---|
395 | else
|
---|
396 | iemTlbInvalidateAllPhysicalSlow(pVCpu);
|
---|
397 | pTlbe->pbMappingR3 = NULL;
|
---|
398 | pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
|
---|
399 | int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
|
---|
400 | &pbMem, &pTlbe->fFlagsAndPhysRev);
|
---|
401 | AssertRCReturn(rc, rc);
|
---|
402 | # ifdef IN_RING3
|
---|
403 | pTlbe->pbMappingR3 = pbMem;
|
---|
404 | # endif
|
---|
405 | }
|
---|
406 | }
|
---|
407 | else
|
---|
408 | {
|
---|
409 | pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
|
---|
410 |
|
---|
411 | /* This page table walking will set A bits as required by the access while performing the walk.
|
---|
412 | ASSUMES these are set when the address is translated rather than on commit... */
|
---|
413 | /** @todo testcase: check when A bits are actually set by the CPU for code. */
|
---|
414 | PGMPTWALKFAST WalkFast;
|
---|
415 | int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, iemMemArmAccessToQPage(pVCpu, fAccess), &WalkFast);
|
---|
416 | if (RT_SUCCESS(rc))
|
---|
417 | Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
|
---|
418 | else
|
---|
419 | {
|
---|
420 | LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
|
---|
421 | /** @todo stage 2 exceptions. */
|
---|
422 | return iemRaiseDataAbortFromWalk(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc, &WalkFast);
|
---|
423 | }
|
---|
424 |
|
---|
425 | uint32_t fDataBps;
|
---|
426 | if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
|
---|
427 | || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
|
---|
428 | {
|
---|
429 | /** @todo arm: check out global pages on arm */
|
---|
430 | if ( (WalkFast.fEffective & PGM_PTATTRS_NG_MASK)
|
---|
431 | || !fPrivileged) /* optimization: Only use global pages privileged accesses. */
|
---|
432 | {
|
---|
433 | pTlbe--;
|
---|
434 | Assert(!IEMTLBE_IS_GLOBAL(pTlbe));
|
---|
435 | pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
|
---|
436 | /// @todo arm: large/giant page fun
|
---|
437 | //if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
|
---|
438 | // iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
|
---|
439 | # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
|
---|
440 | //else
|
---|
441 | ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
|
---|
442 | # endif
|
---|
443 | }
|
---|
444 | else
|
---|
445 | {
|
---|
446 | Assert(IEMTLBE_IS_GLOBAL(pTlbe));
|
---|
447 | pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
|
---|
448 | pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
|
---|
449 | /// @todo arm: large/giant page fun
|
---|
450 | //if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
|
---|
451 | // iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
|
---|
452 | # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
|
---|
453 | //else
|
---|
454 | ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
|
---|
455 | # endif
|
---|
456 | }
|
---|
457 | }
|
---|
458 | else if (fDataBps == 1)
|
---|
459 | {
|
---|
460 | /* There is one or more data breakpionts in the current page, so we use a dummy
|
---|
461 | TLBE to force all accesses to the page with the data access breakpoint armed
|
---|
462 | on it to pass thru here. */
|
---|
463 | pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
|
---|
464 | pTlbe->uTag = uTagNoRev;
|
---|
465 | }
|
---|
466 | else
|
---|
467 | {
|
---|
468 | LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x PC=%016RX64\n",
|
---|
469 | fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.Pc.u64));
|
---|
470 | return iemRaiseDebugDataAccessOrInvokeDbgf(pVCpu, fDataBps, GCPtrMem, cbMem, fAccess);
|
---|
471 | }
|
---|
472 | pTlbe->fFlagsAndPhysRev = iemMemArmPtAttrsToTlbeFlags(WalkFast.fEffective)
|
---|
473 | | ( pVCpu->iem.s.DataTlb.uTlbPhysRev
|
---|
474 | & (!IEMTLBE_IS_GLOBAL(pTlbe) ? IEMTLBE_F_S2_VMID | IEMTLBE_F_S1_ASID : IEMTLBE_F_S2_VMID) );
|
---|
475 |
|
---|
476 | /** @todo PGM_WALKINFO_BIG_PAGE++ */
|
---|
477 | RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)IEM_F_ARM_GET_TLB_PAGE_OFFSET_MASK(pVCpu->iem.s.fExec);
|
---|
478 | pTlbe->GCPhys = GCPhysPg;
|
---|
479 | pTlbe->pbMappingR3 = NULL;
|
---|
480 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_EFF_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
|
---|
481 |
|
---|
482 | if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)
|
---|
483 | {
|
---|
484 | if (!IEMTLBE_IS_GLOBAL(pTlbe))
|
---|
485 | IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
|
---|
486 | else
|
---|
487 | IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
|
---|
488 | }
|
---|
489 |
|
---|
490 | /* Resolve the physical address. */
|
---|
491 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
|
---|
492 | rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
|
---|
493 | &pbMem, &pTlbe->fFlagsAndPhysRev);
|
---|
494 | AssertRCReturn(rc, rc);
|
---|
495 | # ifdef IN_RING3
|
---|
496 | pTlbe->pbMappingR3 = pbMem;
|
---|
497 | # endif
|
---|
498 | }
|
---|
499 |
|
---|
500 | /*
|
---|
501 | * Check the physical page level access and mapping.
|
---|
502 | */
|
---|
503 | if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
|
---|
504 | || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
|
---|
505 | | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
|
---|
506 | { /* probably likely */ }
|
---|
507 | else
|
---|
508 | return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
|
---|
509 | pTlbe->GCPhys | (GCPtrMem & IEM_F_ARM_GET_TLB_PAGE_OFFSET_MASK(pVCpu->iem.s.fExec)),
|
---|
510 | fAccess,
|
---|
511 | pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
|
---|
512 | : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
|
---|
513 | : VERR_PGM_PHYS_TLB_CATCH_WRITE);
|
---|
514 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
|
---|
515 |
|
---|
516 | if (pbMem)
|
---|
517 | {
|
---|
518 | Assert(!((uintptr_t)pbMem & IEM_F_ARM_GET_TLB_PAGE_OFFSET_MASK(pVCpu->iem.s.fExec)));
|
---|
519 | pbMem = pbMem + (GCPtrMem & IEM_F_ARM_GET_TLB_PAGE_OFFSET_MASK(pVCpu->iem.s.fExec));
|
---|
520 | fAccess |= IEM_ACCESS_NOT_LOCKED;
|
---|
521 | }
|
---|
522 | else
|
---|
523 | {
|
---|
524 | Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
|
---|
525 | RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & IEM_F_ARM_GET_TLB_PAGE_OFFSET_MASK(pVCpu->iem.s.fExec));
|
---|
526 | VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
527 | if (rcStrict != VINF_SUCCESS)
|
---|
528 | return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
|
---|
529 | }
|
---|
530 |
|
---|
531 | void * const pvMem = pbMem;
|
---|
532 |
|
---|
533 | if (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
534 | Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & IEM_F_ARM_GET_TLB_PAGE_OFFSET_MASK(pVCpu->iem.s.fExec)), cbMem));
|
---|
535 | if (fAccess & IEM_ACCESS_TYPE_READ)
|
---|
536 | Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & IEM_F_ARM_GET_TLB_PAGE_OFFSET_MASK(pVCpu->iem.s.fExec)), cbMem));
|
---|
537 |
|
---|
538 | #else /* !IEM_WITH_DATA_TLB */
|
---|
539 |
|
---|
540 | RTGCPHYS GCPhysFirst;
|
---|
541 | VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
|
---|
542 | if (rcStrict != VINF_SUCCESS)
|
---|
543 | return rcStrict;
|
---|
544 |
|
---|
545 | if (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
546 | Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
|
---|
547 | if (fAccess & IEM_ACCESS_TYPE_READ)
|
---|
548 | Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
|
---|
549 |
|
---|
550 | void *pvMem;
|
---|
551 | rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
552 | if (rcStrict != VINF_SUCCESS)
|
---|
553 | return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
|
---|
554 |
|
---|
555 | #endif /* !IEM_WITH_DATA_TLB */
|
---|
556 |
|
---|
557 | /*
|
---|
558 | * Fill in the mapping table entry.
|
---|
559 | */
|
---|
560 | pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
|
---|
561 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
|
---|
562 | pVCpu->iem.s.iNextMapping = iMemMap + 1;
|
---|
563 | pVCpu->iem.s.cActiveMappings += 1;
|
---|
564 |
|
---|
565 | *ppvMem = pvMem;
|
---|
566 | *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
|
---|
567 | AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
|
---|
568 | AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
|
---|
569 |
|
---|
570 | return VINF_SUCCESS;
|
---|
571 | }
|
---|
572 |
|
---|
573 |
|
---|
574 | /**
|
---|
575 | * Maps the specified guest memory for the given kind of access, longjmp on
|
---|
576 | * error.
|
---|
577 | *
|
---|
578 | * This may be using bounce buffering of the memory if it's crossing a page
|
---|
579 | * boundary or if there is an access handler installed for any of it. Because
|
---|
580 | * of lock prefix guarantees, we're in for some extra clutter when this
|
---|
581 | * happens.
|
---|
582 | *
|
---|
583 | * This may raise a \#GP, \#SS, \#PF or \#AC.
|
---|
584 | *
|
---|
585 | * @returns Pointer to the mapped memory.
|
---|
586 | *
|
---|
587 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
588 | * @param bUnmapInfo Where to return unmap info to be passed to
|
---|
589 | * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
|
---|
590 | * iemMemCommitAndUnmapWoSafeJmp,
|
---|
591 | * iemMemCommitAndUnmapRoSafeJmp,
|
---|
592 | * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
|
---|
593 | * when done.
|
---|
594 | * @param cbMem The number of bytes to map. This is usually 1,
|
---|
595 | * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
|
---|
596 | * string operations it can be up to a page.
|
---|
597 | * @param iSegReg The index of the segment register to use for
|
---|
598 | * this access. The base and limits are checked.
|
---|
599 | * Use UINT8_MAX to indicate that no segmentation
|
---|
600 | * is required (for IDT, GDT and LDT accesses).
|
---|
601 | * @param GCPtrMem The address of the guest memory.
|
---|
602 | * @param fAccess How the memory is being accessed. The
|
---|
603 | * IEM_ACCESS_TYPE_XXX part is used to figure out how to
|
---|
604 | * map the memory, while the IEM_ACCESS_WHAT_XXX part is
|
---|
605 | * used when raising exceptions. The IEM_ACCESS_ATOMIC and
|
---|
606 | * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
|
---|
607 | * set.
|
---|
608 | * @param uAlignCtl Alignment control:
|
---|
609 | * - Bits 15:0 is the alignment mask.
|
---|
610 | * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
|
---|
611 | * IEM_MEMMAP_F_ALIGN_SSE, and
|
---|
612 | * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
|
---|
613 | * Pass zero to skip alignment.
|
---|
614 | * @tparam a_fSafe Whether this is a call from "safe" fallback function in
|
---|
615 | * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that
|
---|
616 | * needs counting as such in the statistics.
|
---|
617 | */
|
---|
618 | template<bool a_fSafeCall = false>
|
---|
619 | static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, RTGCPTR GCPtrMem,
|
---|
620 | uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
621 | {
|
---|
622 | STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);
|
---|
623 | #if 1 /** @todo redo this according when iemMemMap() has been fully debugged. */
|
---|
624 | void *pvMem = NULL;
|
---|
625 | VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess, uAlignCtl);
|
---|
626 | if (rcStrict == VINF_SUCCESS)
|
---|
627 | { /* likely */ }
|
---|
628 | else
|
---|
629 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
630 | return pvMem;
|
---|
631 |
|
---|
632 | #else /* later */
|
---|
633 |
|
---|
634 | /*
|
---|
635 | * Check the input, check segment access and adjust address
|
---|
636 | * with segment base.
|
---|
637 | */
|
---|
638 | Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
|
---|
639 | Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
|
---|
640 | Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
|
---|
641 |
|
---|
642 | VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
|
---|
643 | if (rcStrict == VINF_SUCCESS) { /*likely*/ }
|
---|
644 | else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
645 |
|
---|
646 | /*
|
---|
647 | * Alignment check.
|
---|
648 | */
|
---|
649 | if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
|
---|
650 | { /* likelyish */ }
|
---|
651 | else
|
---|
652 | {
|
---|
653 | /* Misaligned access. */
|
---|
654 | if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
|
---|
655 | {
|
---|
656 | if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
|
---|
657 | || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
|
---|
658 | && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
|
---|
659 | {
|
---|
660 | AssertCompile(X86_CR0_AM == X86_EFL_AC);
|
---|
661 |
|
---|
662 | if (iemMemAreAlignmentChecksEnabled(pVCpu))
|
---|
663 | iemRaiseAlignmentCheckExceptionJmp(pVCpu);
|
---|
664 | }
|
---|
665 | else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
|
---|
666 | && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
|
---|
667 | /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
|
---|
668 | * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
|
---|
669 | * that's what FXSAVE does on a 10980xe. */
|
---|
670 | && iemMemAreAlignmentChecksEnabled(pVCpu))
|
---|
671 | iemRaiseAlignmentCheckExceptionJmp(pVCpu);
|
---|
672 | else
|
---|
673 | iemRaiseGeneralProtectionFault0Jmp(pVCpu);
|
---|
674 | }
|
---|
675 |
|
---|
676 | #if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
|
---|
677 | /* If the access is atomic there are host platform alignmnet restrictions
|
---|
678 | we need to conform with. */
|
---|
679 | if ( !(fAccess & IEM_ACCESS_ATOMIC)
|
---|
680 | # if defined(RT_ARCH_AMD64)
|
---|
681 | || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
|
---|
682 | # elif defined(RT_ARCH_ARM64)
|
---|
683 | || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
|
---|
684 | # else
|
---|
685 | # error port me
|
---|
686 | # endif
|
---|
687 | )
|
---|
688 | { /* okay */ }
|
---|
689 | else
|
---|
690 | {
|
---|
691 | LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
|
---|
692 | pVCpu->iem.s.cMisalignedAtomics += 1;
|
---|
693 | IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
|
---|
694 | }
|
---|
695 | #endif
|
---|
696 | }
|
---|
697 |
|
---|
698 | /*
|
---|
699 | * Figure out which mapping entry to use.
|
---|
700 | */
|
---|
701 | unsigned iMemMap = pVCpu->iem.s.iNextMapping;
|
---|
702 | if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
703 | || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
|
---|
704 | {
|
---|
705 | iMemMap = iemMemMapFindFree(pVCpu);
|
---|
706 | AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
|
---|
707 | ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
|
---|
708 | pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
|
---|
709 | pVCpu->iem.s.aMemMappings[2].fAccess),
|
---|
710 | IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
|
---|
711 | }
|
---|
712 |
|
---|
713 | /*
|
---|
714 | * Crossing a page boundary?
|
---|
715 | */
|
---|
716 | if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
|
---|
717 | { /* No (likely). */ }
|
---|
718 | else
|
---|
719 | {
|
---|
720 | void *pvMem;
|
---|
721 | rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
|
---|
722 | if (rcStrict == VINF_SUCCESS)
|
---|
723 | return pvMem;
|
---|
724 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
725 | }
|
---|
726 |
|
---|
727 | #ifdef IEM_WITH_DATA_TLB
|
---|
728 | Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
|
---|
729 |
|
---|
730 | /*
|
---|
731 | * Get the TLB entry for this page checking that it has the A & D bits
|
---|
732 | * set as per fAccess flags.
|
---|
733 | */
|
---|
734 | /** @todo make the caller pass these in with fAccess. */
|
---|
735 | uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
|
---|
736 | ? IEMTLBE_F_PT_NO_USER : 0;
|
---|
737 | uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
|
---|
738 | ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
|
---|
739 | | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
|
---|
740 | || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
|
---|
741 | ? IEMTLBE_F_PT_NO_WRITE : 0)
|
---|
742 | : 0;
|
---|
743 | uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
|
---|
744 | uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(pVCpu, GCPtrMem);
|
---|
745 | PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
|
---|
746 | uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);
|
---|
747 | if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
|
---|
748 | && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
|
---|
749 | || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
|
---|
750 | && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
|
---|
751 | {
|
---|
752 | # ifdef IEM_WITH_TLB_STATISTICS
|
---|
753 | if (a_fSafeCall)
|
---|
754 | pVCpu->iem.s.DataTlb.cTlbSafeHits++;
|
---|
755 | else
|
---|
756 | pVCpu->iem.s.DataTlb.cTlbCoreHits++;
|
---|
757 | # endif
|
---|
758 | }
|
---|
759 | else
|
---|
760 | {
|
---|
761 | if (a_fSafeCall)
|
---|
762 | pVCpu->iem.s.DataTlb.cTlbSafeMisses++;
|
---|
763 | else
|
---|
764 | pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
|
---|
765 |
|
---|
766 | /* This page table walking will set A and D bits as required by the
|
---|
767 | access while performing the walk.
|
---|
768 | ASSUMES these are set when the address is translated rather than on commit... */
|
---|
769 | /** @todo testcase: check when A and D bits are actually set by the CPU. */
|
---|
770 | PGMPTWALKFAST WalkFast;
|
---|
771 | AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
|
---|
772 | AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
|
---|
773 | AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
|
---|
774 | AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
|
---|
775 | uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
|
---|
776 | | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
|
---|
777 | if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
|
---|
778 | fQPage |= PGMQPAGE_F_USER_MODE;
|
---|
779 | int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
|
---|
780 | if (RT_SUCCESS(rc))
|
---|
781 | Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
|
---|
782 | else
|
---|
783 | {
|
---|
784 | LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
|
---|
785 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
786 | if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
|
---|
787 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
|
---|
788 | # endif
|
---|
789 | iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
|
---|
790 | }
|
---|
791 |
|
---|
792 | uint32_t fDataBps;
|
---|
793 | if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
|
---|
794 | || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
|
---|
795 | {
|
---|
796 | if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
|
---|
797 | || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
|
---|
798 | {
|
---|
799 | pTlbe--;
|
---|
800 | pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
|
---|
801 | if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
|
---|
802 | iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
|
---|
803 | # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
|
---|
804 | else
|
---|
805 | ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
|
---|
806 | # endif
|
---|
807 | }
|
---|
808 | else
|
---|
809 | {
|
---|
810 | if (a_fSafeCall)
|
---|
811 | pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
|
---|
812 | else
|
---|
813 | pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
|
---|
814 | pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
|
---|
815 | if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
|
---|
816 | iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
|
---|
817 | # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
|
---|
818 | else
|
---|
819 | ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
|
---|
820 | # endif
|
---|
821 | }
|
---|
822 | }
|
---|
823 | else
|
---|
824 | {
|
---|
825 | /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
|
---|
826 | to the page with the data access breakpoint armed on it to pass thru here. */
|
---|
827 | if (fDataBps > 1)
|
---|
828 | LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
|
---|
829 | a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
830 | pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
|
---|
831 | pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
|
---|
832 | pTlbe->uTag = uTagNoRev;
|
---|
833 | }
|
---|
834 | pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
|
---|
835 | | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
|
---|
836 | RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
|
---|
837 | pTlbe->GCPhys = GCPhysPg;
|
---|
838 | pTlbe->pbMappingR3 = NULL;
|
---|
839 | Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
|
---|
840 | Assert( !(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
|
---|
841 | || (fQPage & (PGMQPAGE_F_CR0_WP0 | PGMQPAGE_F_USER_MODE)) == PGMQPAGE_F_CR0_WP0);
|
---|
842 | Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
|
---|
843 |
|
---|
844 | if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)
|
---|
845 | {
|
---|
846 | if (!IEMTLBE_IS_GLOBAL(pTlbe))
|
---|
847 | IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
|
---|
848 | else
|
---|
849 | IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
|
---|
850 | }
|
---|
851 |
|
---|
852 | /* Resolve the physical address. */
|
---|
853 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
|
---|
854 | uint8_t *pbMemFullLoad = NULL;
|
---|
855 | rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
|
---|
856 | &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
|
---|
857 | AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
|
---|
858 | # ifdef IN_RING3
|
---|
859 | pTlbe->pbMappingR3 = pbMemFullLoad;
|
---|
860 | # endif
|
---|
861 | }
|
---|
862 |
|
---|
863 | /*
|
---|
864 | * Check the flags and physical revision.
|
---|
865 | * Note! This will revalidate the uTlbPhysRev after a full load. This is
|
---|
866 | * just to keep the code structure simple (i.e. avoid gotos or similar).
|
---|
867 | */
|
---|
868 | uint8_t *pbMem;
|
---|
869 | if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
|
---|
870 | == pVCpu->iem.s.DataTlb.uTlbPhysRev)
|
---|
871 | # ifdef IN_RING3
|
---|
872 | pbMem = pTlbe->pbMappingR3;
|
---|
873 | # else
|
---|
874 | pbMem = NULL;
|
---|
875 | # endif
|
---|
876 | else
|
---|
877 | {
|
---|
878 | Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
|
---|
879 |
|
---|
880 | /*
|
---|
881 | * Okay, something isn't quite right or needs refreshing.
|
---|
882 | */
|
---|
883 | /* Write to read only memory? */
|
---|
884 | if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
|
---|
885 | {
|
---|
886 | LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
|
---|
887 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
888 | /** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
|
---|
889 | * to trigger an \#PG or a VM nested paging exit here yet! */
|
---|
890 | if (Walk.fFailed & PGM_WALKFAIL_EPT)
|
---|
891 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
|
---|
892 | # endif
|
---|
893 | iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
|
---|
894 | }
|
---|
895 |
|
---|
896 | /* Kernel memory accessed by userland? */
|
---|
897 | if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
|
---|
898 | {
|
---|
899 | LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
|
---|
900 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
901 | /** @todo TLB: See above. */
|
---|
902 | if (Walk.fFailed & PGM_WALKFAIL_EPT)
|
---|
903 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
|
---|
904 | # endif
|
---|
905 | iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
|
---|
906 | }
|
---|
907 |
|
---|
908 | /*
|
---|
909 | * Check if the physical page info needs updating.
|
---|
910 | */
|
---|
911 | if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
|
---|
912 | # ifdef IN_RING3
|
---|
913 | pbMem = pTlbe->pbMappingR3;
|
---|
914 | # else
|
---|
915 | pbMem = NULL;
|
---|
916 | # endif
|
---|
917 | else
|
---|
918 | {
|
---|
919 | pTlbe->pbMappingR3 = NULL;
|
---|
920 | pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
|
---|
921 | pbMem = NULL;
|
---|
922 | int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
|
---|
923 | &pbMem, &pTlbe->fFlagsAndPhysRev);
|
---|
924 | AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
|
---|
925 | # ifdef IN_RING3
|
---|
926 | pTlbe->pbMappingR3 = pbMem;
|
---|
927 | # endif
|
---|
928 | }
|
---|
929 |
|
---|
930 | /*
|
---|
931 | * Check the physical page level access and mapping.
|
---|
932 | */
|
---|
933 | if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
|
---|
934 | { /* probably likely */ }
|
---|
935 | else
|
---|
936 | {
|
---|
937 | rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
|
---|
938 | pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
|
---|
939 | pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
|
---|
940 | : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
|
---|
941 | : VERR_PGM_PHYS_TLB_CATCH_WRITE);
|
---|
942 | if (rcStrict == VINF_SUCCESS)
|
---|
943 | return pbMem;
|
---|
944 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
945 | }
|
---|
946 | }
|
---|
947 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
|
---|
948 |
|
---|
949 | if (pbMem)
|
---|
950 | {
|
---|
951 | Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
|
---|
952 | pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
|
---|
953 | fAccess |= IEM_ACCESS_NOT_LOCKED;
|
---|
954 | }
|
---|
955 | else
|
---|
956 | {
|
---|
957 | Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
|
---|
958 | RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
|
---|
959 | rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
960 | if (rcStrict == VINF_SUCCESS)
|
---|
961 | {
|
---|
962 | *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
|
---|
963 | return pbMem;
|
---|
964 | }
|
---|
965 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
966 | }
|
---|
967 |
|
---|
968 | void * const pvMem = pbMem;
|
---|
969 |
|
---|
970 | if (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
971 | Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
|
---|
972 | if (fAccess & IEM_ACCESS_TYPE_READ)
|
---|
973 | Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
|
---|
974 |
|
---|
975 | #else /* !IEM_WITH_DATA_TLB */
|
---|
976 |
|
---|
977 |
|
---|
978 | RTGCPHYS GCPhysFirst;
|
---|
979 | rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
|
---|
980 | if (rcStrict == VINF_SUCCESS) { /*likely*/ }
|
---|
981 | else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
982 |
|
---|
983 | if (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
984 | Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
|
---|
985 | if (fAccess & IEM_ACCESS_TYPE_READ)
|
---|
986 | Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
|
---|
987 |
|
---|
988 | void *pvMem;
|
---|
989 | rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
990 | if (rcStrict == VINF_SUCCESS)
|
---|
991 | { /* likely */ }
|
---|
992 | else
|
---|
993 | {
|
---|
994 | rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
|
---|
995 | if (rcStrict == VINF_SUCCESS)
|
---|
996 | return pvMem;
|
---|
997 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
998 | }
|
---|
999 |
|
---|
1000 | #endif /* !IEM_WITH_DATA_TLB */
|
---|
1001 |
|
---|
1002 | /*
|
---|
1003 | * Fill in the mapping table entry.
|
---|
1004 | */
|
---|
1005 | pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
|
---|
1006 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
|
---|
1007 | pVCpu->iem.s.iNextMapping = iMemMap + 1;
|
---|
1008 | pVCpu->iem.s.cActiveMappings++;
|
---|
1009 |
|
---|
1010 | *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
|
---|
1011 | return pvMem;
|
---|
1012 | #endif /* later */
|
---|
1013 | }
|
---|
1014 |
|
---|
1015 |
|
---|
1016 | /** @see iemMemMapJmp */
|
---|
1017 | static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, RTGCPTR GCPtrMem,
|
---|
1018 | uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
1019 | {
|
---|
1020 | return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, GCPtrMem, fAccess, uAlignCtl);
|
---|
1021 | }
|
---|
1022 |
|
---|
1023 |
|
---|
1024 | #if 0 /** @todo ARM: more memory stuff... */
|
---|
1025 |
|
---|
1026 | /*
|
---|
1027 | * Instantiate R/W templates.
|
---|
1028 | */
|
---|
1029 | #define TMPL_MEM_WITH_STACK
|
---|
1030 |
|
---|
1031 | #define TMPL_MEM_TYPE uint8_t
|
---|
1032 | #define TMPL_MEM_FN_SUFF U8
|
---|
1033 | #define TMPL_MEM_FMT_TYPE "%#04x"
|
---|
1034 | #define TMPL_MEM_FMT_DESC "byte"
|
---|
1035 | #include "IEMAllMemRWTmpl-x86.cpp.h"
|
---|
1036 |
|
---|
1037 | #define TMPL_MEM_TYPE uint16_t
|
---|
1038 | #define TMPL_MEM_FN_SUFF U16
|
---|
1039 | #define TMPL_MEM_FMT_TYPE "%#06x"
|
---|
1040 | #define TMPL_MEM_FMT_DESC "word"
|
---|
1041 | #include "IEMAllMemRWTmpl-x86.cpp.h"
|
---|
1042 |
|
---|
1043 | #define TMPL_WITH_PUSH_SREG
|
---|
1044 | #define TMPL_MEM_TYPE uint32_t
|
---|
1045 | #define TMPL_MEM_FN_SUFF U32
|
---|
1046 | #define TMPL_MEM_FMT_TYPE "%#010x"
|
---|
1047 | #define TMPL_MEM_FMT_DESC "dword"
|
---|
1048 | #include "IEMAllMemRWTmpl-x86.cpp.h"
|
---|
1049 | #undef TMPL_WITH_PUSH_SREG
|
---|
1050 |
|
---|
1051 | #define TMPL_MEM_TYPE uint64_t
|
---|
1052 | #define TMPL_MEM_FN_SUFF U64
|
---|
1053 | #define TMPL_MEM_FMT_TYPE "%#018RX64"
|
---|
1054 | #define TMPL_MEM_FMT_DESC "qword"
|
---|
1055 | #include "IEMAllMemRWTmpl-x86.cpp.h"
|
---|
1056 |
|
---|
1057 | #undef TMPL_MEM_WITH_STACK
|
---|
1058 |
|
---|
1059 | #define TMPL_MEM_TYPE uint32_t
|
---|
1060 | #define TMPL_MEM_TYPE_ALIGN 0
|
---|
1061 | #define TMPL_MEM_FN_SUFF U32NoAc
|
---|
1062 | #define TMPL_MEM_FMT_TYPE "%#010x"
|
---|
1063 | #define TMPL_MEM_FMT_DESC "dword"
|
---|
1064 | #include "IEMAllMemRWTmpl-x86.cpp.h"
|
---|
1065 | #undef TMPL_WITH_PUSH_SREG
|
---|
1066 |
|
---|
1067 | #define TMPL_MEM_TYPE uint64_t
|
---|
1068 | #define TMPL_MEM_TYPE_ALIGN 0
|
---|
1069 | #define TMPL_MEM_FN_SUFF U64NoAc
|
---|
1070 | #define TMPL_MEM_FMT_TYPE "%#018RX64"
|
---|
1071 | #define TMPL_MEM_FMT_DESC "qword"
|
---|
1072 | #include "IEMAllMemRWTmpl-x86.cpp.h"
|
---|
1073 |
|
---|
1074 | #define TMPL_MEM_TYPE uint64_t
|
---|
1075 | #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
|
---|
1076 | #define TMPL_MEM_FN_SUFF U64AlignedU128
|
---|
1077 | #define TMPL_MEM_FMT_TYPE "%#018RX64"
|
---|
1078 | #define TMPL_MEM_FMT_DESC "qword"
|
---|
1079 | #include "IEMAllMemRWTmpl-x86.cpp.h"
|
---|
1080 |
|
---|
1081 | /* See IEMAllMemRWTmplInline.cpp.h */
|
---|
1082 | #define TMPL_MEM_BY_REF
|
---|
1083 |
|
---|
1084 | #define TMPL_MEM_TYPE RTFLOAT80U
|
---|
1085 | #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
|
---|
1086 | #define TMPL_MEM_FN_SUFF R80
|
---|
1087 | #define TMPL_MEM_FMT_TYPE "%.10Rhxs"
|
---|
1088 | #define TMPL_MEM_FMT_DESC "tword"
|
---|
1089 | #include "IEMAllMemRWTmpl-x86.cpp.h"
|
---|
1090 |
|
---|
1091 | #define TMPL_MEM_TYPE RTPBCD80U
|
---|
1092 | #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
|
---|
1093 | #define TMPL_MEM_FN_SUFF D80
|
---|
1094 | #define TMPL_MEM_FMT_TYPE "%.10Rhxs"
|
---|
1095 | #define TMPL_MEM_FMT_DESC "tword"
|
---|
1096 | #include "IEMAllMemRWTmpl-x86.cpp.h"
|
---|
1097 |
|
---|
1098 | #define TMPL_MEM_TYPE RTUINT128U
|
---|
1099 | #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
|
---|
1100 | #define TMPL_MEM_FN_SUFF U128
|
---|
1101 | #define TMPL_MEM_FMT_TYPE "%.16Rhxs"
|
---|
1102 | #define TMPL_MEM_FMT_DESC "dqword"
|
---|
1103 | #include "IEMAllMemRWTmpl-x86.cpp.h"
|
---|
1104 |
|
---|
1105 | #define TMPL_MEM_TYPE RTUINT128U
|
---|
1106 | #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
|
---|
1107 | #define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
|
---|
1108 | #define TMPL_MEM_FN_SUFF U128AlignedSse
|
---|
1109 | #define TMPL_MEM_FMT_TYPE "%.16Rhxs"
|
---|
1110 | #define TMPL_MEM_FMT_DESC "dqword"
|
---|
1111 | #include "IEMAllMemRWTmpl-x86.cpp.h"
|
---|
1112 |
|
---|
1113 | #define TMPL_MEM_TYPE RTUINT128U
|
---|
1114 | #define TMPL_MEM_TYPE_ALIGN 0
|
---|
1115 | #define TMPL_MEM_FN_SUFF U128NoAc
|
---|
1116 | #define TMPL_MEM_FMT_TYPE "%.16Rhxs"
|
---|
1117 | #define TMPL_MEM_FMT_DESC "dqword"
|
---|
1118 | #include "IEMAllMemRWTmpl-x86.cpp.h"
|
---|
1119 |
|
---|
1120 | #define TMPL_MEM_TYPE RTUINT256U
|
---|
1121 | #define TMPL_MEM_TYPE_ALIGN 0
|
---|
1122 | #define TMPL_MEM_FN_SUFF U256NoAc
|
---|
1123 | #define TMPL_MEM_FMT_TYPE "%.32Rhxs"
|
---|
1124 | #define TMPL_MEM_FMT_DESC "qqword"
|
---|
1125 | #include "IEMAllMemRWTmpl-x86.cpp.h"
|
---|
1126 |
|
---|
1127 | #define TMPL_MEM_TYPE RTUINT256U
|
---|
1128 | #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
|
---|
1129 | #define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
|
---|
1130 | #define TMPL_MEM_FN_SUFF U256AlignedAvx
|
---|
1131 | #define TMPL_MEM_FMT_TYPE "%.32Rhxs"
|
---|
1132 | #define TMPL_MEM_FMT_DESC "qqword"
|
---|
1133 | #include "IEMAllMemRWTmpl-x86.cpp.h"
|
---|
1134 |
|
---|
1135 |
|
---|
1136 | /**
|
---|
1137 | * Fetches a data dword and zero extends it to a qword.
|
---|
1138 | *
|
---|
1139 | * @returns Strict VBox status code.
|
---|
1140 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1141 | * @param pu64Dst Where to return the qword.
|
---|
1142 | * @param iSegReg The index of the segment register to use for
|
---|
1143 | * this access. The base and limits are checked.
|
---|
1144 | * @param GCPtrMem The address of the guest memory.
|
---|
1145 | */
|
---|
1146 | VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
|
---|
1147 | {
|
---|
1148 | /* The lazy approach for now... */
|
---|
1149 | uint8_t bUnmapInfo;
|
---|
1150 | uint32_t const *pu32Src;
|
---|
1151 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
|
---|
1152 | IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
|
---|
1153 | if (rc == VINF_SUCCESS)
|
---|
1154 | {
|
---|
1155 | *pu64Dst = *pu32Src;
|
---|
1156 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
1157 | Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
|
---|
1158 | }
|
---|
1159 | return rc;
|
---|
1160 | }
|
---|
1161 |
|
---|
1162 |
|
---|
1163 | #ifdef SOME_UNUSED_FUNCTION
|
---|
1164 | /**
|
---|
1165 | * Fetches a data dword and sign extends it to a qword.
|
---|
1166 | *
|
---|
1167 | * @returns Strict VBox status code.
|
---|
1168 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1169 | * @param pu64Dst Where to return the sign extended value.
|
---|
1170 | * @param iSegReg The index of the segment register to use for
|
---|
1171 | * this access. The base and limits are checked.
|
---|
1172 | * @param GCPtrMem The address of the guest memory.
|
---|
1173 | */
|
---|
1174 | VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
|
---|
1175 | {
|
---|
1176 | /* The lazy approach for now... */
|
---|
1177 | uint8_t bUnmapInfo;
|
---|
1178 | int32_t const *pi32Src;
|
---|
1179 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
|
---|
1180 | IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
|
---|
1181 | if (rc == VINF_SUCCESS)
|
---|
1182 | {
|
---|
1183 | *pu64Dst = *pi32Src;
|
---|
1184 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
1185 | Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
|
---|
1186 | }
|
---|
1187 | #ifdef __GNUC__ /* warning: GCC may be a royal pain */
|
---|
1188 | else
|
---|
1189 | *pu64Dst = 0;
|
---|
1190 | #endif
|
---|
1191 | return rc;
|
---|
1192 | }
|
---|
1193 | #endif
|
---|
1194 |
|
---|
1195 |
|
---|
1196 | /**
|
---|
1197 | * Fetches a descriptor register (lgdt, lidt).
|
---|
1198 | *
|
---|
1199 | * @returns Strict VBox status code.
|
---|
1200 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1201 | * @param pcbLimit Where to return the limit.
|
---|
1202 | * @param pGCPtrBase Where to return the base.
|
---|
1203 | * @param iSegReg The index of the segment register to use for
|
---|
1204 | * this access. The base and limits are checked.
|
---|
1205 | * @param GCPtrMem The address of the guest memory.
|
---|
1206 | * @param enmOpSize The effective operand size.
|
---|
1207 | */
|
---|
1208 | VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
|
---|
1209 | RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
|
---|
1210 | {
|
---|
1211 | /*
|
---|
1212 | * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
|
---|
1213 | * little special:
|
---|
1214 | * - The two reads are done separately.
|
---|
1215 | * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
|
---|
1216 | * - We suspect the 386 to actually commit the limit before the base in
|
---|
1217 | * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
|
---|
1218 | * don't try emulate this eccentric behavior, because it's not well
|
---|
1219 | * enough understood and rather hard to trigger.
|
---|
1220 | * - The 486 seems to do a dword limit read when the operand size is 32-bit.
|
---|
1221 | */
|
---|
1222 | VBOXSTRICTRC rcStrict;
|
---|
1223 | if (IEM_IS_64BIT_CODE(pVCpu))
|
---|
1224 | {
|
---|
1225 | rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
|
---|
1226 | if (rcStrict == VINF_SUCCESS)
|
---|
1227 | rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
|
---|
1228 | }
|
---|
1229 | else
|
---|
1230 | {
|
---|
1231 | uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
|
---|
1232 | if (enmOpSize == IEMMODE_32BIT)
|
---|
1233 | {
|
---|
1234 | if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
|
---|
1235 | {
|
---|
1236 | rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
|
---|
1237 | if (rcStrict == VINF_SUCCESS)
|
---|
1238 | rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
|
---|
1239 | }
|
---|
1240 | else
|
---|
1241 | {
|
---|
1242 | rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
|
---|
1243 | if (rcStrict == VINF_SUCCESS)
|
---|
1244 | {
|
---|
1245 | *pcbLimit = (uint16_t)uTmp;
|
---|
1246 | rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
|
---|
1247 | }
|
---|
1248 | }
|
---|
1249 | if (rcStrict == VINF_SUCCESS)
|
---|
1250 | *pGCPtrBase = uTmp;
|
---|
1251 | }
|
---|
1252 | else
|
---|
1253 | {
|
---|
1254 | rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
|
---|
1255 | if (rcStrict == VINF_SUCCESS)
|
---|
1256 | {
|
---|
1257 | rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
|
---|
1258 | if (rcStrict == VINF_SUCCESS)
|
---|
1259 | *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
|
---|
1260 | }
|
---|
1261 | }
|
---|
1262 | }
|
---|
1263 | return rcStrict;
|
---|
1264 | }
|
---|
1265 |
|
---|
1266 |
|
---|
1267 | /**
|
---|
1268 | * Stores a data dqword, SSE aligned.
|
---|
1269 | *
|
---|
1270 | * @returns Strict VBox status code.
|
---|
1271 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1272 | * @param iSegReg The index of the segment register to use for
|
---|
1273 | * this access. The base and limits are checked.
|
---|
1274 | * @param GCPtrMem The address of the guest memory.
|
---|
1275 | * @param u128Value The value to store.
|
---|
1276 | */
|
---|
1277 | VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
|
---|
1278 | {
|
---|
1279 | /* The lazy approach for now... */
|
---|
1280 | uint8_t bUnmapInfo;
|
---|
1281 | PRTUINT128U pu128Dst;
|
---|
1282 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
|
---|
1283 | (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
|
---|
1284 | if (rc == VINF_SUCCESS)
|
---|
1285 | {
|
---|
1286 | pu128Dst->au64[0] = u128Value.au64[0];
|
---|
1287 | pu128Dst->au64[1] = u128Value.au64[1];
|
---|
1288 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
1289 | Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
|
---|
1290 | }
|
---|
1291 | return rc;
|
---|
1292 | }
|
---|
1293 |
|
---|
1294 |
|
---|
1295 | /**
|
---|
1296 | * Stores a data dqword, SSE aligned.
|
---|
1297 | *
|
---|
1298 | * @returns Strict VBox status code.
|
---|
1299 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1300 | * @param iSegReg The index of the segment register to use for
|
---|
1301 | * this access. The base and limits are checked.
|
---|
1302 | * @param GCPtrMem The address of the guest memory.
|
---|
1303 | * @param u128Value The value to store.
|
---|
1304 | */
|
---|
1305 | void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
|
---|
1306 | RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
1307 | {
|
---|
1308 | /* The lazy approach for now... */
|
---|
1309 | uint8_t bUnmapInfo;
|
---|
1310 | PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
|
---|
1311 | (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
|
---|
1312 | pu128Dst->au64[0] = u128Value.au64[0];
|
---|
1313 | pu128Dst->au64[1] = u128Value.au64[1];
|
---|
1314 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
1315 | Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
|
---|
1316 | }
|
---|
1317 |
|
---|
1318 |
|
---|
1319 | /**
|
---|
1320 | * Stores a data dqword.
|
---|
1321 | *
|
---|
1322 | * @returns Strict VBox status code.
|
---|
1323 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1324 | * @param iSegReg The index of the segment register to use for
|
---|
1325 | * this access. The base and limits are checked.
|
---|
1326 | * @param GCPtrMem The address of the guest memory.
|
---|
1327 | * @param pu256Value Pointer to the value to store.
|
---|
1328 | */
|
---|
1329 | VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
|
---|
1330 | {
|
---|
1331 | /* The lazy approach for now... */
|
---|
1332 | uint8_t bUnmapInfo;
|
---|
1333 | PRTUINT256U pu256Dst;
|
---|
1334 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
|
---|
1335 | IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
|
---|
1336 | if (rc == VINF_SUCCESS)
|
---|
1337 | {
|
---|
1338 | pu256Dst->au64[0] = pu256Value->au64[0];
|
---|
1339 | pu256Dst->au64[1] = pu256Value->au64[1];
|
---|
1340 | pu256Dst->au64[2] = pu256Value->au64[2];
|
---|
1341 | pu256Dst->au64[3] = pu256Value->au64[3];
|
---|
1342 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
1343 | Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
|
---|
1344 | }
|
---|
1345 | return rc;
|
---|
1346 | }
|
---|
1347 |
|
---|
1348 |
|
---|
1349 | /**
|
---|
1350 | * Stores a data dqword, longjmp on error.
|
---|
1351 | *
|
---|
1352 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1353 | * @param iSegReg The index of the segment register to use for
|
---|
1354 | * this access. The base and limits are checked.
|
---|
1355 | * @param GCPtrMem The address of the guest memory.
|
---|
1356 | * @param pu256Value Pointer to the value to store.
|
---|
1357 | */
|
---|
1358 | void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
1359 | {
|
---|
1360 | /* The lazy approach for now... */
|
---|
1361 | uint8_t bUnmapInfo;
|
---|
1362 | PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
|
---|
1363 | IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
|
---|
1364 | pu256Dst->au64[0] = pu256Value->au64[0];
|
---|
1365 | pu256Dst->au64[1] = pu256Value->au64[1];
|
---|
1366 | pu256Dst->au64[2] = pu256Value->au64[2];
|
---|
1367 | pu256Dst->au64[3] = pu256Value->au64[3];
|
---|
1368 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
1369 | Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
|
---|
1370 | }
|
---|
1371 |
|
---|
1372 |
|
---|
1373 | /**
|
---|
1374 | * Stores a descriptor register (sgdt, sidt).
|
---|
1375 | *
|
---|
1376 | * @returns Strict VBox status code.
|
---|
1377 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1378 | * @param cbLimit The limit.
|
---|
1379 | * @param GCPtrBase The base address.
|
---|
1380 | * @param iSegReg The index of the segment register to use for
|
---|
1381 | * this access. The base and limits are checked.
|
---|
1382 | * @param GCPtrMem The address of the guest memory.
|
---|
1383 | */
|
---|
1384 | VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
|
---|
1385 | {
|
---|
1386 | /*
|
---|
1387 | * The SIDT and SGDT instructions actually stores the data using two
|
---|
1388 | * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
|
---|
1389 | * does not respond to opsize prefixes.
|
---|
1390 | */
|
---|
1391 | VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
|
---|
1392 | if (rcStrict == VINF_SUCCESS)
|
---|
1393 | {
|
---|
1394 | if (IEM_IS_16BIT_CODE(pVCpu))
|
---|
1395 | rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
|
---|
1396 | IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
|
---|
1397 | ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
|
---|
1398 | else if (IEM_IS_32BIT_CODE(pVCpu))
|
---|
1399 | rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
|
---|
1400 | else
|
---|
1401 | rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
|
---|
1402 | }
|
---|
1403 | return rcStrict;
|
---|
1404 | }
|
---|
1405 |
|
---|
1406 |
|
---|
1407 | /**
|
---|
1408 | * Begin a special stack push (used by interrupt, exceptions and such).
|
---|
1409 | *
|
---|
1410 | * This will raise \#SS or \#PF if appropriate.
|
---|
1411 | *
|
---|
1412 | * @returns Strict VBox status code.
|
---|
1413 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1414 | * @param cbMem The number of bytes to push onto the stack.
|
---|
1415 | * @param cbAlign The alignment mask (7, 3, 1).
|
---|
1416 | * @param ppvMem Where to return the pointer to the stack memory.
|
---|
1417 | * As with the other memory functions this could be
|
---|
1418 | * direct access or bounce buffered access, so
|
---|
1419 | * don't commit register until the commit call
|
---|
1420 | * succeeds.
|
---|
1421 | * @param pbUnmapInfo Where to store unmap info for
|
---|
1422 | * iemMemStackPushCommitSpecial.
|
---|
1423 | * @param puNewRsp Where to return the new RSP value. This must be
|
---|
1424 | * passed unchanged to
|
---|
1425 | * iemMemStackPushCommitSpecial().
|
---|
1426 | */
|
---|
1427 | VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
|
---|
1428 | void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
|
---|
1429 | {
|
---|
1430 | Assert(cbMem < UINT8_MAX);
|
---|
1431 | RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
|
---|
1432 | return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
|
---|
1433 | }
|
---|
1434 |
|
---|
1435 |
|
---|
1436 | /**
|
---|
1437 | * Commits a special stack push (started by iemMemStackPushBeginSpecial).
|
---|
1438 | *
|
---|
1439 | * This will update the rSP.
|
---|
1440 | *
|
---|
1441 | * @returns Strict VBox status code.
|
---|
1442 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1443 | * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
|
---|
1444 | * @param uNewRsp The new RSP value returned by
|
---|
1445 | * iemMemStackPushBeginSpecial().
|
---|
1446 | */
|
---|
1447 | VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
|
---|
1448 | {
|
---|
1449 | VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
1450 | if (rcStrict == VINF_SUCCESS)
|
---|
1451 | pVCpu->cpum.GstCtx.rsp = uNewRsp;
|
---|
1452 | return rcStrict;
|
---|
1453 | }
|
---|
1454 |
|
---|
1455 |
|
---|
1456 | /**
|
---|
1457 | * Begin a special stack pop (used by iret, retf and such).
|
---|
1458 | *
|
---|
1459 | * This will raise \#SS or \#PF if appropriate.
|
---|
1460 | *
|
---|
1461 | * @returns Strict VBox status code.
|
---|
1462 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1463 | * @param cbMem The number of bytes to pop from the stack.
|
---|
1464 | * @param cbAlign The alignment mask (7, 3, 1).
|
---|
1465 | * @param ppvMem Where to return the pointer to the stack memory.
|
---|
1466 | * @param pbUnmapInfo Where to store unmap info for
|
---|
1467 | * iemMemStackPopDoneSpecial.
|
---|
1468 | * @param puNewRsp Where to return the new RSP value. This must be
|
---|
1469 | * assigned to CPUMCTX::rsp manually some time
|
---|
1470 | * after iemMemStackPopDoneSpecial() has been
|
---|
1471 | * called.
|
---|
1472 | */
|
---|
1473 | VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
|
---|
1474 | void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
|
---|
1475 | {
|
---|
1476 | Assert(cbMem < UINT8_MAX);
|
---|
1477 | RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
|
---|
1478 | return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
|
---|
1479 | }
|
---|
1480 |
|
---|
1481 |
|
---|
1482 | /**
|
---|
1483 | * Continue a special stack pop (used by iret and retf), for the purpose of
|
---|
1484 | * retrieving a new stack pointer.
|
---|
1485 | *
|
---|
1486 | * This will raise \#SS or \#PF if appropriate.
|
---|
1487 | *
|
---|
1488 | * @returns Strict VBox status code.
|
---|
1489 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1490 | * @param off Offset from the top of the stack. This is zero
|
---|
1491 | * except in the retf case.
|
---|
1492 | * @param cbMem The number of bytes to pop from the stack.
|
---|
1493 | * @param ppvMem Where to return the pointer to the stack memory.
|
---|
1494 | * @param pbUnmapInfo Where to store unmap info for
|
---|
1495 | * iemMemStackPopDoneSpecial.
|
---|
1496 | * @param uCurNewRsp The current uncommitted RSP value. (No need to
|
---|
1497 | * return this because all use of this function is
|
---|
1498 | * to retrieve a new value and anything we return
|
---|
1499 | * here would be discarded.)
|
---|
1500 | */
|
---|
1501 | VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
|
---|
1502 | void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
|
---|
1503 | {
|
---|
1504 | Assert(cbMem < UINT8_MAX);
|
---|
1505 |
|
---|
1506 | /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
|
---|
1507 | RTGCPTR GCPtrTop;
|
---|
1508 | if (IEM_IS_64BIT_CODE(pVCpu))
|
---|
1509 | GCPtrTop = uCurNewRsp;
|
---|
1510 | else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
|
---|
1511 | GCPtrTop = (uint32_t)uCurNewRsp;
|
---|
1512 | else
|
---|
1513 | GCPtrTop = (uint16_t)uCurNewRsp;
|
---|
1514 |
|
---|
1515 | return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
|
---|
1516 | 0 /* checked in iemMemStackPopBeginSpecial */);
|
---|
1517 | }
|
---|
1518 |
|
---|
1519 |
|
---|
1520 | /**
|
---|
1521 | * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
|
---|
1522 | * iemMemStackPopContinueSpecial).
|
---|
1523 | *
|
---|
1524 | * The caller will manually commit the rSP.
|
---|
1525 | *
|
---|
1526 | * @returns Strict VBox status code.
|
---|
1527 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1528 | * @param bUnmapInfo Unmap information returned by
|
---|
1529 | * iemMemStackPopBeginSpecial() or
|
---|
1530 | * iemMemStackPopContinueSpecial().
|
---|
1531 | */
|
---|
1532 | VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
|
---|
1533 | {
|
---|
1534 | return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
1535 | }
|
---|
1536 |
|
---|
1537 |
|
---|
1538 | /**
|
---|
1539 | * Fetches a system table byte.
|
---|
1540 | *
|
---|
1541 | * @returns Strict VBox status code.
|
---|
1542 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1543 | * @param pbDst Where to return the byte.
|
---|
1544 | * @param iSegReg The index of the segment register to use for
|
---|
1545 | * this access. The base and limits are checked.
|
---|
1546 | * @param GCPtrMem The address of the guest memory.
|
---|
1547 | */
|
---|
1548 | VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
|
---|
1549 | {
|
---|
1550 | /* The lazy approach for now... */
|
---|
1551 | uint8_t bUnmapInfo;
|
---|
1552 | uint8_t const *pbSrc;
|
---|
1553 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
|
---|
1554 | if (rc == VINF_SUCCESS)
|
---|
1555 | {
|
---|
1556 | *pbDst = *pbSrc;
|
---|
1557 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
1558 | }
|
---|
1559 | return rc;
|
---|
1560 | }
|
---|
1561 |
|
---|
1562 |
|
---|
1563 | /**
|
---|
1564 | * Fetches a system table word.
|
---|
1565 | *
|
---|
1566 | * @returns Strict VBox status code.
|
---|
1567 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1568 | * @param pu16Dst Where to return the word.
|
---|
1569 | * @param iSegReg The index of the segment register to use for
|
---|
1570 | * this access. The base and limits are checked.
|
---|
1571 | * @param GCPtrMem The address of the guest memory.
|
---|
1572 | */
|
---|
1573 | VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
|
---|
1574 | {
|
---|
1575 | /* The lazy approach for now... */
|
---|
1576 | uint8_t bUnmapInfo;
|
---|
1577 | uint16_t const *pu16Src;
|
---|
1578 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
|
---|
1579 | if (rc == VINF_SUCCESS)
|
---|
1580 | {
|
---|
1581 | *pu16Dst = *pu16Src;
|
---|
1582 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
1583 | }
|
---|
1584 | return rc;
|
---|
1585 | }
|
---|
1586 |
|
---|
1587 |
|
---|
1588 | /**
|
---|
1589 | * Fetches a system table dword.
|
---|
1590 | *
|
---|
1591 | * @returns Strict VBox status code.
|
---|
1592 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1593 | * @param pu32Dst Where to return the dword.
|
---|
1594 | * @param iSegReg The index of the segment register to use for
|
---|
1595 | * this access. The base and limits are checked.
|
---|
1596 | * @param GCPtrMem The address of the guest memory.
|
---|
1597 | */
|
---|
1598 | VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
|
---|
1599 | {
|
---|
1600 | /* The lazy approach for now... */
|
---|
1601 | uint8_t bUnmapInfo;
|
---|
1602 | uint32_t const *pu32Src;
|
---|
1603 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
|
---|
1604 | if (rc == VINF_SUCCESS)
|
---|
1605 | {
|
---|
1606 | *pu32Dst = *pu32Src;
|
---|
1607 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
1608 | }
|
---|
1609 | return rc;
|
---|
1610 | }
|
---|
1611 |
|
---|
1612 |
|
---|
1613 | /**
|
---|
1614 | * Fetches a system table qword.
|
---|
1615 | *
|
---|
1616 | * @returns Strict VBox status code.
|
---|
1617 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1618 | * @param pu64Dst Where to return the qword.
|
---|
1619 | * @param iSegReg The index of the segment register to use for
|
---|
1620 | * this access. The base and limits are checked.
|
---|
1621 | * @param GCPtrMem The address of the guest memory.
|
---|
1622 | */
|
---|
1623 | VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
|
---|
1624 | {
|
---|
1625 | /* The lazy approach for now... */
|
---|
1626 | uint8_t bUnmapInfo;
|
---|
1627 | uint64_t const *pu64Src;
|
---|
1628 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
|
---|
1629 | if (rc == VINF_SUCCESS)
|
---|
1630 | {
|
---|
1631 | *pu64Dst = *pu64Src;
|
---|
1632 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
1633 | }
|
---|
1634 | return rc;
|
---|
1635 | }
|
---|
1636 |
|
---|
1637 |
|
---|
1638 | /**
|
---|
1639 | * Fetches a descriptor table entry with caller specified error code.
|
---|
1640 | *
|
---|
1641 | * @returns Strict VBox status code.
|
---|
1642 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1643 | * @param pDesc Where to return the descriptor table entry.
|
---|
1644 | * @param uSel The selector which table entry to fetch.
|
---|
1645 | * @param uXcpt The exception to raise on table lookup error.
|
---|
1646 | * @param uErrorCode The error code associated with the exception.
|
---|
1647 | */
|
---|
1648 | VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
|
---|
1649 | uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
|
---|
1650 | {
|
---|
1651 | AssertPtr(pDesc);
|
---|
1652 | IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
|
---|
1653 |
|
---|
1654 | /** @todo did the 286 require all 8 bytes to be accessible? */
|
---|
1655 | /*
|
---|
1656 | * Get the selector table base and check bounds.
|
---|
1657 | */
|
---|
1658 | RTGCPTR GCPtrBase;
|
---|
1659 | if (uSel & X86_SEL_LDT)
|
---|
1660 | {
|
---|
1661 | if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
|
---|
1662 | || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
|
---|
1663 | {
|
---|
1664 | LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
|
---|
1665 | uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
|
---|
1666 | return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
|
---|
1667 | uErrorCode, 0);
|
---|
1668 | }
|
---|
1669 |
|
---|
1670 | Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
|
---|
1671 | GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
|
---|
1672 | }
|
---|
1673 | else
|
---|
1674 | {
|
---|
1675 | if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
|
---|
1676 | {
|
---|
1677 | LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
|
---|
1678 | return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
|
---|
1679 | uErrorCode, 0);
|
---|
1680 | }
|
---|
1681 | GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
|
---|
1682 | }
|
---|
1683 |
|
---|
1684 | /*
|
---|
1685 | * Read the legacy descriptor and maybe the long mode extensions if
|
---|
1686 | * required.
|
---|
1687 | */
|
---|
1688 | VBOXSTRICTRC rcStrict;
|
---|
1689 | if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
|
---|
1690 | rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
|
---|
1691 | else
|
---|
1692 | {
|
---|
1693 | rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
|
---|
1694 | if (rcStrict == VINF_SUCCESS)
|
---|
1695 | rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
|
---|
1696 | if (rcStrict == VINF_SUCCESS)
|
---|
1697 | rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
|
---|
1698 | if (rcStrict == VINF_SUCCESS)
|
---|
1699 | pDesc->Legacy.au16[3] = 0;
|
---|
1700 | else
|
---|
1701 | return rcStrict;
|
---|
1702 | }
|
---|
1703 |
|
---|
1704 | if (rcStrict == VINF_SUCCESS)
|
---|
1705 | {
|
---|
1706 | if ( !IEM_IS_LONG_MODE(pVCpu)
|
---|
1707 | || pDesc->Legacy.Gen.u1DescType)
|
---|
1708 | pDesc->Long.au64[1] = 0;
|
---|
1709 | else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
|
---|
1710 | <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
|
---|
1711 | rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
|
---|
1712 | else
|
---|
1713 | {
|
---|
1714 | LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
|
---|
1715 | /** @todo is this the right exception? */
|
---|
1716 | return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
|
---|
1717 | }
|
---|
1718 | }
|
---|
1719 | return rcStrict;
|
---|
1720 | }
|
---|
1721 |
|
---|
1722 |
|
---|
1723 | /**
|
---|
1724 | * Fetches a descriptor table entry.
|
---|
1725 | *
|
---|
1726 | * @returns Strict VBox status code.
|
---|
1727 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1728 | * @param pDesc Where to return the descriptor table entry.
|
---|
1729 | * @param uSel The selector which table entry to fetch.
|
---|
1730 | * @param uXcpt The exception to raise on table lookup error.
|
---|
1731 | */
|
---|
1732 | VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
|
---|
1733 | {
|
---|
1734 | return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
|
---|
1735 | }
|
---|
1736 |
|
---|
1737 |
|
---|
1738 | /**
|
---|
1739 | * Marks the selector descriptor as accessed (only non-system descriptors).
|
---|
1740 | *
|
---|
1741 | * This function ASSUMES that iemMemFetchSelDesc has be called previously and
|
---|
1742 | * will therefore skip the limit checks.
|
---|
1743 | *
|
---|
1744 | * @returns Strict VBox status code.
|
---|
1745 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1746 | * @param uSel The selector.
|
---|
1747 | */
|
---|
1748 | VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
|
---|
1749 | {
|
---|
1750 | /*
|
---|
1751 | * Get the selector table base and calculate the entry address.
|
---|
1752 | */
|
---|
1753 | RTGCPTR GCPtr = uSel & X86_SEL_LDT
|
---|
1754 | ? pVCpu->cpum.GstCtx.ldtr.u64Base
|
---|
1755 | : pVCpu->cpum.GstCtx.gdtr.pGdt;
|
---|
1756 | GCPtr += uSel & X86_SEL_MASK;
|
---|
1757 |
|
---|
1758 | /*
|
---|
1759 | * ASMAtomicBitSet will assert if the address is misaligned, so do some
|
---|
1760 | * ugly stuff to avoid this. This will make sure it's an atomic access
|
---|
1761 | * as well more or less remove any question about 8-bit or 32-bit accesss.
|
---|
1762 | */
|
---|
1763 | VBOXSTRICTRC rcStrict;
|
---|
1764 | uint8_t bUnmapInfo;
|
---|
1765 | uint32_t volatile *pu32;
|
---|
1766 | if ((GCPtr & 3) == 0)
|
---|
1767 | {
|
---|
1768 | /* The normal case, map the 32-bit bits around the accessed bit (40). */
|
---|
1769 | GCPtr += 2 + 2;
|
---|
1770 | rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
|
---|
1771 | if (rcStrict != VINF_SUCCESS)
|
---|
1772 | return rcStrict;
|
---|
1773 | ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
|
---|
1774 | }
|
---|
1775 | else
|
---|
1776 | {
|
---|
1777 | /* The misaligned GDT/LDT case, map the whole thing. */
|
---|
1778 | rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
|
---|
1779 | if (rcStrict != VINF_SUCCESS)
|
---|
1780 | return rcStrict;
|
---|
1781 | switch ((uintptr_t)pu32 & 3)
|
---|
1782 | {
|
---|
1783 | case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
|
---|
1784 | case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
|
---|
1785 | case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
|
---|
1786 | case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
|
---|
1787 | }
|
---|
1788 | }
|
---|
1789 |
|
---|
1790 | return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
1791 | }
|
---|
1792 |
|
---|
1793 | /** @} */
|
---|
1794 |
|
---|
1795 | #endif /* work in progress */
|
---|
1796 |
|
---|