VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllMem-x86.cpp

Last change on this file was 108791, checked in by vboxsync, 11 days ago

VMM/IEM: More ARM target work. jiraref:VBP-1598

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 80.5 KB
Line 
1/* $Id: IEMAllMem-x86.cpp 108791 2025-03-28 21:58:31Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - x86 target, memory.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM_MEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/pgm.h>
40#include <VBox/vmm/dbgf.h>
41#include "IEMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include <VBox/log.h>
44#include <VBox/err.h>
45#include <iprt/assert.h>
46#include <iprt/string.h>
47#include <iprt/x86.h>
48
49#include "IEMInline.h"
50#include "IEMInline-x86.h"
51#include "IEMInlineMem-x86.h"
52#include "IEMAllTlbInline-x86.h"
53
54
55/** @name Memory access.
56 *
57 * @{
58 */
59
60/**
61 * Applies the segment limit, base and attributes.
62 *
63 * This may raise a \#GP or \#SS.
64 *
65 * @returns VBox strict status code.
66 *
67 * @param pVCpu The cross context virtual CPU structure of the calling thread.
68 * @param fAccess The kind of access which is being performed.
69 * @param iSegReg The index of the segment register to apply.
70 * This is UINT8_MAX if none (for IDT, GDT, LDT,
71 * TSS, ++).
72 * @param cbMem The access size.
73 * @param pGCPtrMem Pointer to the guest memory address to apply
74 * segmentation to. Input and output parameter.
75 */
76VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
77{
78 if (iSegReg == UINT8_MAX)
79 return VINF_SUCCESS;
80
81 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
82 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
83 switch (IEM_GET_CPU_MODE(pVCpu))
84 {
85 case IEMMODE_16BIT:
86 case IEMMODE_32BIT:
87 {
88 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
89 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
90
91 if ( pSel->Attr.n.u1Present
92 && !pSel->Attr.n.u1Unusable)
93 {
94 Assert(pSel->Attr.n.u1DescType);
95 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
96 {
97 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
98 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
99 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
100
101 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
102 {
103 /** @todo CPL check. */
104 }
105
106 /*
107 * There are two kinds of data selectors, normal and expand down.
108 */
109 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
110 {
111 if ( GCPtrFirst32 > pSel->u32Limit
112 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
113 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
114 }
115 else
116 {
117 /*
118 * The upper boundary is defined by the B bit, not the G bit!
119 */
120 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
121 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
122 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
123 }
124 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
125 }
126 else
127 {
128 /*
129 * Code selector and usually be used to read thru, writing is
130 * only permitted in real and V8086 mode.
131 */
132 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
133 || ( (fAccess & IEM_ACCESS_TYPE_READ)
134 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
135 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
136 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
137
138 if ( GCPtrFirst32 > pSel->u32Limit
139 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
140 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
141
142 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
143 {
144 /** @todo CPL check. */
145 }
146
147 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
148 }
149 }
150 else
151 return iemRaiseGeneralProtectionFault0(pVCpu);
152 return VINF_SUCCESS;
153 }
154
155 case IEMMODE_64BIT:
156 {
157 RTGCPTR GCPtrMem = *pGCPtrMem;
158 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
159 *pGCPtrMem = GCPtrMem + pSel->u64Base;
160
161 Assert(cbMem >= 1);
162 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
163 return VINF_SUCCESS;
164 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
165 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
166 return iemRaiseGeneralProtectionFault0(pVCpu);
167 }
168
169 default:
170 AssertFailedReturn(VERR_IEM_IPE_7);
171 }
172}
173
174
175/**
176 * Translates a virtual address to a physical physical address and checks if we
177 * can access the page as specified.
178 *
179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
180 * @param GCPtrMem The virtual address.
181 * @param cbAccess The access size, for raising \#PF correctly for
182 * FXSAVE and such.
183 * @param fAccess The intended access.
184 * @param pGCPhysMem Where to return the physical address.
185 */
186VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
187 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
188{
189 /** @todo Need a different PGM interface here. We're currently using
190 * generic / REM interfaces. this won't cut it for R0. */
191 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
192 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
193 * here. */
194 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
195 PGMPTWALKFAST WalkFast;
196 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
197 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
198 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
199 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
200 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
201 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
202 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
203 fQPage |= PGMQPAGE_F_USER_MODE;
204 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
205 if (RT_SUCCESS(rc))
206 {
207 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
208
209 /* If the page is writable and does not have the no-exec bit set, all
210 access is allowed. Otherwise we'll have to check more carefully... */
211 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
212 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
213 || (WalkFast.fEffective & X86_PTE_RW)
214 || ( ( IEM_GET_CPL(pVCpu) != 3
215 || (fAccess & IEM_ACCESS_WHAT_SYS))
216 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
217 && ( (WalkFast.fEffective & X86_PTE_US)
218 || IEM_GET_CPL(pVCpu) != 3
219 || (fAccess & IEM_ACCESS_WHAT_SYS) )
220 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
221 || !(WalkFast.fEffective & X86_PTE_PAE_NX)
222 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
223 )
224 );
225
226 /* PGMGstQueryPageFast sets the A & D bits. */
227 /** @todo testcase: check when A and D bits are actually set by the CPU. */
228 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
229
230 *pGCPhysMem = WalkFast.GCPhys;
231 return VINF_SUCCESS;
232 }
233
234 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
235 /** @todo Check unassigned memory in unpaged mode. */
236#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
237 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
238 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
239#endif
240 *pGCPhysMem = NIL_RTGCPHYS;
241 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
242}
243
244
245/**
246 * Finds a free memmap entry when using iNextMapping doesn't work.
247 *
248 * @returns Memory mapping index, 1024 on failure.
249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
250 */
251static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
252{
253 /*
254 * The easy case.
255 */
256 if (pVCpu->iem.s.cActiveMappings == 0)
257 {
258 pVCpu->iem.s.iNextMapping = 1;
259 return 0;
260 }
261
262 /* There should be enough mappings for all instructions. */
263 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
264
265 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
266 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
267 return i;
268
269 AssertFailedReturn(1024);
270}
271
272
273#ifdef IEM_WITH_DATA_TLB
274/**
275 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
276 * @todo duplicated
277 */
278DECL_FORCE_INLINE(uint32_t)
279iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
280{
281 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
282 if (fAccess & IEM_ACCESS_TYPE_WRITE)
283 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
284 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
285}
286#endif
287
288
289/**
290 * Maps the specified guest memory for the given kind of access.
291 *
292 * This may be using bounce buffering of the memory if it's crossing a page
293 * boundary or if there is an access handler installed for any of it. Because
294 * of lock prefix guarantees, we're in for some extra clutter when this
295 * happens.
296 *
297 * This may raise a \#GP, \#SS, \#PF or \#AC.
298 *
299 * @returns VBox strict status code.
300 *
301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
302 * @param ppvMem Where to return the pointer to the mapped memory.
303 * @param pbUnmapInfo Where to return unmap info to be passed to
304 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
305 * done.
306 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
307 * 8, 12, 16, 32 or 512. When used by string operations
308 * it can be up to a page.
309 * @param iSegReg The index of the segment register to use for this
310 * access. The base and limits are checked. Use UINT8_MAX
311 * to indicate that no segmentation is required (for IDT,
312 * GDT and LDT accesses).
313 * @param GCPtrMem The address of the guest memory.
314 * @param fAccess How the memory is being accessed. The
315 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
316 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
317 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
318 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
319 * set.
320 * @param uAlignCtl Alignment control:
321 * - Bits 15:0 is the alignment mask.
322 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
323 * IEM_MEMMAP_F_ALIGN_SSE, and
324 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
325 * Pass zero to skip alignment.
326 */
327VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
328 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
329{
330 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);
331
332 /*
333 * Check the input and figure out which mapping entry to use.
334 */
335 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
336 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
337 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
338 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
339 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
340
341 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
342 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
343 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
344 {
345 iMemMap = iemMemMapFindFree(pVCpu);
346 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
347 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
348 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
349 pVCpu->iem.s.aMemMappings[2].fAccess),
350 VERR_IEM_IPE_9);
351 }
352
353 /*
354 * Map the memory, checking that we can actually access it. If something
355 * slightly complicated happens, fall back on bounce buffering.
356 */
357 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
358 if (rcStrict == VINF_SUCCESS)
359 { /* likely */ }
360 else
361 return rcStrict;
362
363 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
364 { /* likely */ }
365 else
366 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
367
368 /*
369 * Alignment check.
370 */
371 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
372 { /* likelyish */ }
373 else
374 {
375 /* Misaligned access. */
376 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
377 {
378 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
379 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
380 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
381 {
382 AssertCompile(X86_CR0_AM == X86_EFL_AC);
383
384 if (!iemMemAreAlignmentChecksEnabled(pVCpu))
385 { /* likely */ }
386 else
387 return iemRaiseAlignmentCheckException(pVCpu);
388 }
389 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
390 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
391 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
392 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
393 * that's what FXSAVE does on a 10980xe. */
394 && iemMemAreAlignmentChecksEnabled(pVCpu))
395 return iemRaiseAlignmentCheckException(pVCpu);
396 else
397 return iemRaiseGeneralProtectionFault0(pVCpu);
398 }
399
400#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
401 /* If the access is atomic there are host platform alignmnet restrictions
402 we need to conform with. */
403 if ( !(fAccess & IEM_ACCESS_ATOMIC)
404# if defined(RT_ARCH_AMD64)
405 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
406# elif defined(RT_ARCH_ARM64)
407 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
408# else
409# error port me
410# endif
411 )
412 { /* okay */ }
413 else
414 {
415 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
416 pVCpu->iem.s.cMisalignedAtomics += 1;
417 return VINF_EM_EMULATE_SPLIT_LOCK;
418 }
419#endif
420 }
421
422#ifdef IEM_WITH_DATA_TLB
423 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
424
425 /*
426 * Get the TLB entry for this page and check PT flags.
427 *
428 * We reload the TLB entry if we need to set the dirty bit (accessed
429 * should in theory always be set).
430 */
431 uint8_t *pbMem = NULL;
432 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(pVCpu, GCPtrMem);
433 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
434 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);
435 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
436 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
437 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
438 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
439 {
440# ifdef IEM_WITH_TLB_STATISTICS
441 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
442# endif
443
444 /* If the page is either supervisor only or non-writable, we need to do
445 more careful access checks. */
446 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
447 {
448 /* Write to read only memory? */
449 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
450 && (fAccess & IEM_ACCESS_TYPE_WRITE)
451 && ( ( IEM_GET_CPL(pVCpu) == 3
452 && !(fAccess & IEM_ACCESS_WHAT_SYS))
453 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
454 {
455 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
456 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
457 }
458
459 /* Kernel memory accessed by userland? */
460 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
461 && IEM_GET_CPL(pVCpu) == 3
462 && !(fAccess & IEM_ACCESS_WHAT_SYS))
463 {
464 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
465 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
466 }
467 }
468
469 /* Look up the physical page info if necessary. */
470 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
471# ifdef IN_RING3
472 pbMem = pTlbe->pbMappingR3;
473# else
474 pbMem = NULL;
475# endif
476 else
477 {
478 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
479 { /* likely */ }
480 else
481 iemTlbInvalidateAllPhysicalSlow(pVCpu);
482 pTlbe->pbMappingR3 = NULL;
483 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
484 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
485 &pbMem, &pTlbe->fFlagsAndPhysRev);
486 AssertRCReturn(rc, rc);
487# ifdef IN_RING3
488 pTlbe->pbMappingR3 = pbMem;
489# endif
490 }
491 }
492 else
493 {
494 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
495
496 /* This page table walking will set A bits as required by the access while performing the walk.
497 ASSUMES these are set when the address is translated rather than on commit... */
498 /** @todo testcase: check when A bits are actually set by the CPU for code. */
499 PGMPTWALKFAST WalkFast;
500 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
501 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
502 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
503 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
504 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
505 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
506 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
507 fQPage |= PGMQPAGE_F_USER_MODE;
508 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
509 if (RT_SUCCESS(rc))
510 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
511 else
512 {
513 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
514# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
515 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
516 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
517# endif
518 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
519 }
520
521 uint32_t fDataBps;
522 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
523 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
524 {
525 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
526 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
527 {
528 pTlbe--;
529 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
530 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
531 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
532# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
533 else
534 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
535# endif
536 }
537 else
538 {
539 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
540 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
541 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
542 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
543# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
544 else
545 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
546# endif
547 }
548 }
549 else
550 {
551 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
552 to the page with the data access breakpoint armed on it to pass thru here. */
553 if (fDataBps > 1)
554 LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
555 fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
556 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
557 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
558 pTlbe->uTag = uTagNoRev;
559 }
560 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
561 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
562 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
563 pTlbe->GCPhys = GCPhysPg;
564 pTlbe->pbMappingR3 = NULL;
565 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
566 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
567 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
568 || !(fAccess & IEM_ACCESS_TYPE_WRITE)
569 || (fQPage & (PGMQPAGE_F_CR0_WP0 | PGMQPAGE_F_USER_MODE)) == PGMQPAGE_F_CR0_WP0);
570 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
571 || IEM_GET_CPL(pVCpu) != 3
572 || (fAccess & IEM_ACCESS_WHAT_SYS));
573
574 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)
575 {
576 if (!IEMTLBE_IS_GLOBAL(pTlbe))
577 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
578 else
579 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
580 }
581
582 /* Resolve the physical address. */
583 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
584 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
585 &pbMem, &pTlbe->fFlagsAndPhysRev);
586 AssertRCReturn(rc, rc);
587# ifdef IN_RING3
588 pTlbe->pbMappingR3 = pbMem;
589# endif
590 }
591
592 /*
593 * Check the physical page level access and mapping.
594 */
595 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
596 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
597 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
598 { /* probably likely */ }
599 else
600 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
601 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
602 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
603 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
604 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
605 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
606
607 if (pbMem)
608 {
609 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
610 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
611 fAccess |= IEM_ACCESS_NOT_LOCKED;
612 }
613 else
614 {
615 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
616 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
617 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
618 if (rcStrict != VINF_SUCCESS)
619 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
620 }
621
622 void * const pvMem = pbMem;
623
624 if (fAccess & IEM_ACCESS_TYPE_WRITE)
625 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
626 if (fAccess & IEM_ACCESS_TYPE_READ)
627 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
628
629#else /* !IEM_WITH_DATA_TLB */
630
631 RTGCPHYS GCPhysFirst;
632 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
633 if (rcStrict != VINF_SUCCESS)
634 return rcStrict;
635
636 if (fAccess & IEM_ACCESS_TYPE_WRITE)
637 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
638 if (fAccess & IEM_ACCESS_TYPE_READ)
639 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
640
641 void *pvMem;
642 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
643 if (rcStrict != VINF_SUCCESS)
644 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
645
646#endif /* !IEM_WITH_DATA_TLB */
647
648 /*
649 * Fill in the mapping table entry.
650 */
651 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
652 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
653 pVCpu->iem.s.iNextMapping = iMemMap + 1;
654 pVCpu->iem.s.cActiveMappings += 1;
655
656 *ppvMem = pvMem;
657 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
658 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
659 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
660
661 return VINF_SUCCESS;
662}
663
664
665/**
666 * Maps the specified guest memory for the given kind of access, longjmp on
667 * error.
668 *
669 * This may be using bounce buffering of the memory if it's crossing a page
670 * boundary or if there is an access handler installed for any of it. Because
671 * of lock prefix guarantees, we're in for some extra clutter when this
672 * happens.
673 *
674 * This may raise a \#GP, \#SS, \#PF or \#AC.
675 *
676 * @returns Pointer to the mapped memory.
677 *
678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
679 * @param bUnmapInfo Where to return unmap info to be passed to
680 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
681 * iemMemCommitAndUnmapWoSafeJmp,
682 * iemMemCommitAndUnmapRoSafeJmp,
683 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
684 * when done.
685 * @param cbMem The number of bytes to map. This is usually 1,
686 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
687 * string operations it can be up to a page.
688 * @param iSegReg The index of the segment register to use for
689 * this access. The base and limits are checked.
690 * Use UINT8_MAX to indicate that no segmentation
691 * is required (for IDT, GDT and LDT accesses).
692 * @param GCPtrMem The address of the guest memory.
693 * @param fAccess How the memory is being accessed. The
694 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
695 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
696 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
697 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
698 * set.
699 * @param uAlignCtl Alignment control:
700 * - Bits 15:0 is the alignment mask.
701 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
702 * IEM_MEMMAP_F_ALIGN_SSE, and
703 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
704 * Pass zero to skip alignment.
705 * @tparam a_fSafe Whether this is a call from "safe" fallback function in
706 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that
707 * needs counting as such in the statistics.
708 */
709template<bool a_fSafeCall = false>
710static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
711 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
712{
713 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);
714
715 /*
716 * Check the input, check segment access and adjust address
717 * with segment base.
718 */
719 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
720 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
721 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
722
723 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
724 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
725 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
726
727 /*
728 * Alignment check.
729 */
730 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
731 { /* likelyish */ }
732 else
733 {
734 /* Misaligned access. */
735 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
736 {
737 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
738 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
739 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
740 {
741 AssertCompile(X86_CR0_AM == X86_EFL_AC);
742
743 if (iemMemAreAlignmentChecksEnabled(pVCpu))
744 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
745 }
746 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
747 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
748 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
749 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
750 * that's what FXSAVE does on a 10980xe. */
751 && iemMemAreAlignmentChecksEnabled(pVCpu))
752 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
753 else
754 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
755 }
756
757#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
758 /* If the access is atomic there are host platform alignmnet restrictions
759 we need to conform with. */
760 if ( !(fAccess & IEM_ACCESS_ATOMIC)
761# if defined(RT_ARCH_AMD64)
762 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
763# elif defined(RT_ARCH_ARM64)
764 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
765# else
766# error port me
767# endif
768 )
769 { /* okay */ }
770 else
771 {
772 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
773 pVCpu->iem.s.cMisalignedAtomics += 1;
774 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
775 }
776#endif
777 }
778
779 /*
780 * Figure out which mapping entry to use.
781 */
782 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
783 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
784 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
785 {
786 iMemMap = iemMemMapFindFree(pVCpu);
787 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
788 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
789 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
790 pVCpu->iem.s.aMemMappings[2].fAccess),
791 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
792 }
793
794 /*
795 * Crossing a page boundary?
796 */
797 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
798 { /* No (likely). */ }
799 else
800 {
801 void *pvMem;
802 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
803 if (rcStrict == VINF_SUCCESS)
804 return pvMem;
805 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
806 }
807
808#ifdef IEM_WITH_DATA_TLB
809 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
810
811 /*
812 * Get the TLB entry for this page checking that it has the A & D bits
813 * set as per fAccess flags.
814 */
815 /** @todo make the caller pass these in with fAccess. */
816 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
817 ? IEMTLBE_F_PT_NO_USER : 0;
818 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
819 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
820 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
821 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
822 ? IEMTLBE_F_PT_NO_WRITE : 0)
823 : 0;
824 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
825 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(pVCpu, GCPtrMem);
826 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
827 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);
828 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
829 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
830 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
831 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
832 {
833# ifdef IEM_WITH_TLB_STATISTICS
834 if (a_fSafeCall)
835 pVCpu->iem.s.DataTlb.cTlbSafeHits++;
836 else
837 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
838# endif
839 }
840 else
841 {
842 if (a_fSafeCall)
843 pVCpu->iem.s.DataTlb.cTlbSafeMisses++;
844 else
845 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
846
847 /* This page table walking will set A and D bits as required by the
848 access while performing the walk.
849 ASSUMES these are set when the address is translated rather than on commit... */
850 /** @todo testcase: check when A and D bits are actually set by the CPU. */
851 PGMPTWALKFAST WalkFast;
852 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
853 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
854 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
855 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
856 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
857 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
858 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
859 fQPage |= PGMQPAGE_F_USER_MODE;
860 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
861 if (RT_SUCCESS(rc))
862 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
863 else
864 {
865 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
866# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
867 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
868 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
869# endif
870 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
871 }
872
873 uint32_t fDataBps;
874 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
875 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
876 {
877 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
878 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
879 {
880 pTlbe--;
881 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
882 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
883 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
884# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
885 else
886 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
887# endif
888 }
889 else
890 {
891 if (a_fSafeCall)
892 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
893 else
894 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
895 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
896 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
897 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
898# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
899 else
900 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
901# endif
902 }
903 }
904 else
905 {
906 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
907 to the page with the data access breakpoint armed on it to pass thru here. */
908 if (fDataBps > 1)
909 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
910 a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
911 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
912 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
913 pTlbe->uTag = uTagNoRev;
914 }
915 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
916 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
917 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
918 pTlbe->GCPhys = GCPhysPg;
919 pTlbe->pbMappingR3 = NULL;
920 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
921 Assert( !(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
922 || (fQPage & (PGMQPAGE_F_CR0_WP0 | PGMQPAGE_F_USER_MODE)) == PGMQPAGE_F_CR0_WP0);
923 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
924
925 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)
926 {
927 if (!IEMTLBE_IS_GLOBAL(pTlbe))
928 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
929 else
930 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
931 }
932
933 /* Resolve the physical address. */
934 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
935 uint8_t *pbMemFullLoad = NULL;
936 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
937 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
938 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
939# ifdef IN_RING3
940 pTlbe->pbMappingR3 = pbMemFullLoad;
941# endif
942 }
943
944 /*
945 * Check the flags and physical revision.
946 * Note! This will revalidate the uTlbPhysRev after a full load. This is
947 * just to keep the code structure simple (i.e. avoid gotos or similar).
948 */
949 uint8_t *pbMem;
950 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
951 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
952# ifdef IN_RING3
953 pbMem = pTlbe->pbMappingR3;
954# else
955 pbMem = NULL;
956# endif
957 else
958 {
959 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
960
961 /*
962 * Okay, something isn't quite right or needs refreshing.
963 */
964 /* Write to read only memory? */
965 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
966 {
967 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
968# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
969/** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
970 * to trigger an \#PG or a VM nested paging exit here yet! */
971 if (Walk.fFailed & PGM_WALKFAIL_EPT)
972 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
973# endif
974 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
975 }
976
977 /* Kernel memory accessed by userland? */
978 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
979 {
980 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
981# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
982/** @todo TLB: See above. */
983 if (Walk.fFailed & PGM_WALKFAIL_EPT)
984 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
985# endif
986 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
987 }
988
989 /*
990 * Check if the physical page info needs updating.
991 */
992 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
993# ifdef IN_RING3
994 pbMem = pTlbe->pbMappingR3;
995# else
996 pbMem = NULL;
997# endif
998 else
999 {
1000 pTlbe->pbMappingR3 = NULL;
1001 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
1002 pbMem = NULL;
1003 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
1004 &pbMem, &pTlbe->fFlagsAndPhysRev);
1005 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1006# ifdef IN_RING3
1007 pTlbe->pbMappingR3 = pbMem;
1008# endif
1009 }
1010
1011 /*
1012 * Check the physical page level access and mapping.
1013 */
1014 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
1015 { /* probably likely */ }
1016 else
1017 {
1018 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
1019 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
1020 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
1021 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
1022 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
1023 if (rcStrict == VINF_SUCCESS)
1024 return pbMem;
1025 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1026 }
1027 }
1028 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
1029
1030 if (pbMem)
1031 {
1032 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
1033 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
1034 fAccess |= IEM_ACCESS_NOT_LOCKED;
1035 }
1036 else
1037 {
1038 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
1039 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
1040 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
1041 if (rcStrict == VINF_SUCCESS)
1042 {
1043 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
1044 return pbMem;
1045 }
1046 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1047 }
1048
1049 void * const pvMem = pbMem;
1050
1051 if (fAccess & IEM_ACCESS_TYPE_WRITE)
1052 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
1053 if (fAccess & IEM_ACCESS_TYPE_READ)
1054 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
1055
1056#else /* !IEM_WITH_DATA_TLB */
1057
1058
1059 RTGCPHYS GCPhysFirst;
1060 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
1061 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
1062 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1063
1064 if (fAccess & IEM_ACCESS_TYPE_WRITE)
1065 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
1066 if (fAccess & IEM_ACCESS_TYPE_READ)
1067 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
1068
1069 void *pvMem;
1070 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
1071 if (rcStrict == VINF_SUCCESS)
1072 { /* likely */ }
1073 else
1074 {
1075 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
1076 if (rcStrict == VINF_SUCCESS)
1077 return pvMem;
1078 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1079 }
1080
1081#endif /* !IEM_WITH_DATA_TLB */
1082
1083 /*
1084 * Fill in the mapping table entry.
1085 */
1086 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
1087 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
1088 pVCpu->iem.s.iNextMapping = iMemMap + 1;
1089 pVCpu->iem.s.cActiveMappings++;
1090
1091 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
1092 return pvMem;
1093}
1094
1095
1096/** @see iemMemMapJmp */
1097static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
1098 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
1099{
1100 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);
1101}
1102
1103
1104
1105/*
1106 * Instantiate R/W templates.
1107 */
1108#define TMPL_MEM_WITH_STACK
1109
1110#define TMPL_MEM_TYPE uint8_t
1111#define TMPL_MEM_FN_SUFF U8
1112#define TMPL_MEM_FMT_TYPE "%#04x"
1113#define TMPL_MEM_FMT_DESC "byte"
1114#include "IEMAllMemRWTmpl-x86.cpp.h"
1115
1116#define TMPL_MEM_TYPE uint16_t
1117#define TMPL_MEM_FN_SUFF U16
1118#define TMPL_MEM_FMT_TYPE "%#06x"
1119#define TMPL_MEM_FMT_DESC "word"
1120#include "IEMAllMemRWTmpl-x86.cpp.h"
1121
1122#define TMPL_WITH_PUSH_SREG
1123#define TMPL_MEM_TYPE uint32_t
1124#define TMPL_MEM_FN_SUFF U32
1125#define TMPL_MEM_FMT_TYPE "%#010x"
1126#define TMPL_MEM_FMT_DESC "dword"
1127#include "IEMAllMemRWTmpl-x86.cpp.h"
1128#undef TMPL_WITH_PUSH_SREG
1129
1130#define TMPL_MEM_TYPE uint64_t
1131#define TMPL_MEM_FN_SUFF U64
1132#define TMPL_MEM_FMT_TYPE "%#018RX64"
1133#define TMPL_MEM_FMT_DESC "qword"
1134#include "IEMAllMemRWTmpl-x86.cpp.h"
1135
1136#undef TMPL_MEM_WITH_STACK
1137
1138#define TMPL_MEM_TYPE uint32_t
1139#define TMPL_MEM_TYPE_ALIGN 0
1140#define TMPL_MEM_FN_SUFF U32NoAc
1141#define TMPL_MEM_FMT_TYPE "%#010x"
1142#define TMPL_MEM_FMT_DESC "dword"
1143#include "IEMAllMemRWTmpl-x86.cpp.h"
1144#undef TMPL_WITH_PUSH_SREG
1145
1146#define TMPL_MEM_TYPE uint64_t
1147#define TMPL_MEM_TYPE_ALIGN 0
1148#define TMPL_MEM_FN_SUFF U64NoAc
1149#define TMPL_MEM_FMT_TYPE "%#018RX64"
1150#define TMPL_MEM_FMT_DESC "qword"
1151#include "IEMAllMemRWTmpl-x86.cpp.h"
1152
1153#define TMPL_MEM_TYPE uint64_t
1154#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
1155#define TMPL_MEM_FN_SUFF U64AlignedU128
1156#define TMPL_MEM_FMT_TYPE "%#018RX64"
1157#define TMPL_MEM_FMT_DESC "qword"
1158#include "IEMAllMemRWTmpl-x86.cpp.h"
1159
1160/* See IEMAllMemRWTmplInline.cpp.h */
1161#define TMPL_MEM_BY_REF
1162
1163#define TMPL_MEM_TYPE RTFLOAT80U
1164#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
1165#define TMPL_MEM_FN_SUFF R80
1166#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
1167#define TMPL_MEM_FMT_DESC "tword"
1168#include "IEMAllMemRWTmpl-x86.cpp.h"
1169
1170#define TMPL_MEM_TYPE RTPBCD80U
1171#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
1172#define TMPL_MEM_FN_SUFF D80
1173#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
1174#define TMPL_MEM_FMT_DESC "tword"
1175#include "IEMAllMemRWTmpl-x86.cpp.h"
1176
1177#define TMPL_MEM_TYPE RTUINT128U
1178#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
1179#define TMPL_MEM_FN_SUFF U128
1180#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
1181#define TMPL_MEM_FMT_DESC "dqword"
1182#include "IEMAllMemRWTmpl-x86.cpp.h"
1183
1184#define TMPL_MEM_TYPE RTUINT128U
1185#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
1186#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
1187#define TMPL_MEM_FN_SUFF U128AlignedSse
1188#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
1189#define TMPL_MEM_FMT_DESC "dqword"
1190#include "IEMAllMemRWTmpl-x86.cpp.h"
1191
1192#define TMPL_MEM_TYPE RTUINT128U
1193#define TMPL_MEM_TYPE_ALIGN 0
1194#define TMPL_MEM_FN_SUFF U128NoAc
1195#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
1196#define TMPL_MEM_FMT_DESC "dqword"
1197#include "IEMAllMemRWTmpl-x86.cpp.h"
1198
1199#define TMPL_MEM_TYPE RTUINT256U
1200#define TMPL_MEM_TYPE_ALIGN 0
1201#define TMPL_MEM_FN_SUFF U256NoAc
1202#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
1203#define TMPL_MEM_FMT_DESC "qqword"
1204#include "IEMAllMemRWTmpl-x86.cpp.h"
1205
1206#define TMPL_MEM_TYPE RTUINT256U
1207#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
1208#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
1209#define TMPL_MEM_FN_SUFF U256AlignedAvx
1210#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
1211#define TMPL_MEM_FMT_DESC "qqword"
1212#include "IEMAllMemRWTmpl-x86.cpp.h"
1213
1214
1215/**
1216 * Fetches a data dword and zero extends it to a qword.
1217 *
1218 * @returns Strict VBox status code.
1219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1220 * @param pu64Dst Where to return the qword.
1221 * @param iSegReg The index of the segment register to use for
1222 * this access. The base and limits are checked.
1223 * @param GCPtrMem The address of the guest memory.
1224 */
1225VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
1226{
1227 /* The lazy approach for now... */
1228 uint8_t bUnmapInfo;
1229 uint32_t const *pu32Src;
1230 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
1231 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
1232 if (rc == VINF_SUCCESS)
1233 {
1234 *pu64Dst = *pu32Src;
1235 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1236 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
1237 }
1238 return rc;
1239}
1240
1241
1242#ifdef SOME_UNUSED_FUNCTION
1243/**
1244 * Fetches a data dword and sign extends it to a qword.
1245 *
1246 * @returns Strict VBox status code.
1247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1248 * @param pu64Dst Where to return the sign extended value.
1249 * @param iSegReg The index of the segment register to use for
1250 * this access. The base and limits are checked.
1251 * @param GCPtrMem The address of the guest memory.
1252 */
1253VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
1254{
1255 /* The lazy approach for now... */
1256 uint8_t bUnmapInfo;
1257 int32_t const *pi32Src;
1258 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
1259 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
1260 if (rc == VINF_SUCCESS)
1261 {
1262 *pu64Dst = *pi32Src;
1263 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1264 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
1265 }
1266#ifdef __GNUC__ /* warning: GCC may be a royal pain */
1267 else
1268 *pu64Dst = 0;
1269#endif
1270 return rc;
1271}
1272#endif
1273
1274
1275/**
1276 * Fetches a descriptor register (lgdt, lidt).
1277 *
1278 * @returns Strict VBox status code.
1279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1280 * @param pcbLimit Where to return the limit.
1281 * @param pGCPtrBase Where to return the base.
1282 * @param iSegReg The index of the segment register to use for
1283 * this access. The base and limits are checked.
1284 * @param GCPtrMem The address of the guest memory.
1285 * @param enmOpSize The effective operand size.
1286 */
1287VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
1288 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
1289{
1290 /*
1291 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
1292 * little special:
1293 * - The two reads are done separately.
1294 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
1295 * - We suspect the 386 to actually commit the limit before the base in
1296 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
1297 * don't try emulate this eccentric behavior, because it's not well
1298 * enough understood and rather hard to trigger.
1299 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
1300 */
1301 VBOXSTRICTRC rcStrict;
1302 if (IEM_IS_64BIT_CODE(pVCpu))
1303 {
1304 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
1305 if (rcStrict == VINF_SUCCESS)
1306 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
1307 }
1308 else
1309 {
1310 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
1311 if (enmOpSize == IEMMODE_32BIT)
1312 {
1313 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
1314 {
1315 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
1316 if (rcStrict == VINF_SUCCESS)
1317 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
1318 }
1319 else
1320 {
1321 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
1322 if (rcStrict == VINF_SUCCESS)
1323 {
1324 *pcbLimit = (uint16_t)uTmp;
1325 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
1326 }
1327 }
1328 if (rcStrict == VINF_SUCCESS)
1329 *pGCPtrBase = uTmp;
1330 }
1331 else
1332 {
1333 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
1334 if (rcStrict == VINF_SUCCESS)
1335 {
1336 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
1337 if (rcStrict == VINF_SUCCESS)
1338 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
1339 }
1340 }
1341 }
1342 return rcStrict;
1343}
1344
1345
1346/**
1347 * Stores a data dqword, SSE aligned.
1348 *
1349 * @returns Strict VBox status code.
1350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1351 * @param iSegReg The index of the segment register to use for
1352 * this access. The base and limits are checked.
1353 * @param GCPtrMem The address of the guest memory.
1354 * @param u128Value The value to store.
1355 */
1356VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
1357{
1358 /* The lazy approach for now... */
1359 uint8_t bUnmapInfo;
1360 PRTUINT128U pu128Dst;
1361 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
1362 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
1363 if (rc == VINF_SUCCESS)
1364 {
1365 pu128Dst->au64[0] = u128Value.au64[0];
1366 pu128Dst->au64[1] = u128Value.au64[1];
1367 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1368 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
1369 }
1370 return rc;
1371}
1372
1373
1374/**
1375 * Stores a data dqword, SSE aligned.
1376 *
1377 * @returns Strict VBox status code.
1378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1379 * @param iSegReg The index of the segment register to use for
1380 * this access. The base and limits are checked.
1381 * @param GCPtrMem The address of the guest memory.
1382 * @param u128Value The value to store.
1383 */
1384void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
1385 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
1386{
1387 /* The lazy approach for now... */
1388 uint8_t bUnmapInfo;
1389 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
1390 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
1391 pu128Dst->au64[0] = u128Value.au64[0];
1392 pu128Dst->au64[1] = u128Value.au64[1];
1393 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
1394 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
1395}
1396
1397
1398/**
1399 * Stores a data dqword.
1400 *
1401 * @returns Strict VBox status code.
1402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1403 * @param iSegReg The index of the segment register to use for
1404 * this access. The base and limits are checked.
1405 * @param GCPtrMem The address of the guest memory.
1406 * @param pu256Value Pointer to the value to store.
1407 */
1408VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
1409{
1410 /* The lazy approach for now... */
1411 uint8_t bUnmapInfo;
1412 PRTUINT256U pu256Dst;
1413 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
1414 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
1415 if (rc == VINF_SUCCESS)
1416 {
1417 pu256Dst->au64[0] = pu256Value->au64[0];
1418 pu256Dst->au64[1] = pu256Value->au64[1];
1419 pu256Dst->au64[2] = pu256Value->au64[2];
1420 pu256Dst->au64[3] = pu256Value->au64[3];
1421 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1422 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
1423 }
1424 return rc;
1425}
1426
1427
1428/**
1429 * Stores a data dqword, longjmp on error.
1430 *
1431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1432 * @param iSegReg The index of the segment register to use for
1433 * this access. The base and limits are checked.
1434 * @param GCPtrMem The address of the guest memory.
1435 * @param pu256Value Pointer to the value to store.
1436 */
1437void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
1438{
1439 /* The lazy approach for now... */
1440 uint8_t bUnmapInfo;
1441 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
1442 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
1443 pu256Dst->au64[0] = pu256Value->au64[0];
1444 pu256Dst->au64[1] = pu256Value->au64[1];
1445 pu256Dst->au64[2] = pu256Value->au64[2];
1446 pu256Dst->au64[3] = pu256Value->au64[3];
1447 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
1448 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
1449}
1450
1451
1452/**
1453 * Stores a descriptor register (sgdt, sidt).
1454 *
1455 * @returns Strict VBox status code.
1456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1457 * @param cbLimit The limit.
1458 * @param GCPtrBase The base address.
1459 * @param iSegReg The index of the segment register to use for
1460 * this access. The base and limits are checked.
1461 * @param GCPtrMem The address of the guest memory.
1462 */
1463VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
1464{
1465 /*
1466 * The SIDT and SGDT instructions actually stores the data using two
1467 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
1468 * does not respond to opsize prefixes.
1469 */
1470 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
1471 if (rcStrict == VINF_SUCCESS)
1472 {
1473 if (IEM_IS_16BIT_CODE(pVCpu))
1474 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
1475 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
1476 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
1477 else if (IEM_IS_32BIT_CODE(pVCpu))
1478 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
1479 else
1480 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
1481 }
1482 return rcStrict;
1483}
1484
1485
1486/**
1487 * Begin a special stack push (used by interrupt, exceptions and such).
1488 *
1489 * This will raise \#SS or \#PF if appropriate.
1490 *
1491 * @returns Strict VBox status code.
1492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1493 * @param cbMem The number of bytes to push onto the stack.
1494 * @param cbAlign The alignment mask (7, 3, 1).
1495 * @param ppvMem Where to return the pointer to the stack memory.
1496 * As with the other memory functions this could be
1497 * direct access or bounce buffered access, so
1498 * don't commit register until the commit call
1499 * succeeds.
1500 * @param pbUnmapInfo Where to store unmap info for
1501 * iemMemStackPushCommitSpecial.
1502 * @param puNewRsp Where to return the new RSP value. This must be
1503 * passed unchanged to
1504 * iemMemStackPushCommitSpecial().
1505 */
1506VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
1507 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
1508{
1509 Assert(cbMem < UINT8_MAX);
1510 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
1511 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
1512}
1513
1514
1515/**
1516 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
1517 *
1518 * This will update the rSP.
1519 *
1520 * @returns Strict VBox status code.
1521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1522 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
1523 * @param uNewRsp The new RSP value returned by
1524 * iemMemStackPushBeginSpecial().
1525 */
1526VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
1527{
1528 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1529 if (rcStrict == VINF_SUCCESS)
1530 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1531 return rcStrict;
1532}
1533
1534
1535/**
1536 * Begin a special stack pop (used by iret, retf and such).
1537 *
1538 * This will raise \#SS or \#PF if appropriate.
1539 *
1540 * @returns Strict VBox status code.
1541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1542 * @param cbMem The number of bytes to pop from the stack.
1543 * @param cbAlign The alignment mask (7, 3, 1).
1544 * @param ppvMem Where to return the pointer to the stack memory.
1545 * @param pbUnmapInfo Where to store unmap info for
1546 * iemMemStackPopDoneSpecial.
1547 * @param puNewRsp Where to return the new RSP value. This must be
1548 * assigned to CPUMCTX::rsp manually some time
1549 * after iemMemStackPopDoneSpecial() has been
1550 * called.
1551 */
1552VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
1553 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
1554{
1555 Assert(cbMem < UINT8_MAX);
1556 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
1557 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
1558}
1559
1560
1561/**
1562 * Continue a special stack pop (used by iret and retf), for the purpose of
1563 * retrieving a new stack pointer.
1564 *
1565 * This will raise \#SS or \#PF if appropriate.
1566 *
1567 * @returns Strict VBox status code.
1568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1569 * @param off Offset from the top of the stack. This is zero
1570 * except in the retf case.
1571 * @param cbMem The number of bytes to pop from the stack.
1572 * @param ppvMem Where to return the pointer to the stack memory.
1573 * @param pbUnmapInfo Where to store unmap info for
1574 * iemMemStackPopDoneSpecial.
1575 * @param uCurNewRsp The current uncommitted RSP value. (No need to
1576 * return this because all use of this function is
1577 * to retrieve a new value and anything we return
1578 * here would be discarded.)
1579 */
1580VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
1581 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
1582{
1583 Assert(cbMem < UINT8_MAX);
1584
1585 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
1586 RTGCPTR GCPtrTop;
1587 if (IEM_IS_64BIT_CODE(pVCpu))
1588 GCPtrTop = uCurNewRsp;
1589 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1590 GCPtrTop = (uint32_t)uCurNewRsp;
1591 else
1592 GCPtrTop = (uint16_t)uCurNewRsp;
1593
1594 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
1595 0 /* checked in iemMemStackPopBeginSpecial */);
1596}
1597
1598
1599/**
1600 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
1601 * iemMemStackPopContinueSpecial).
1602 *
1603 * The caller will manually commit the rSP.
1604 *
1605 * @returns Strict VBox status code.
1606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1607 * @param bUnmapInfo Unmap information returned by
1608 * iemMemStackPopBeginSpecial() or
1609 * iemMemStackPopContinueSpecial().
1610 */
1611VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
1612{
1613 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1614}
1615
1616
1617/**
1618 * Fetches a system table byte.
1619 *
1620 * @returns Strict VBox status code.
1621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1622 * @param pbDst Where to return the byte.
1623 * @param iSegReg The index of the segment register to use for
1624 * this access. The base and limits are checked.
1625 * @param GCPtrMem The address of the guest memory.
1626 */
1627VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
1628{
1629 /* The lazy approach for now... */
1630 uint8_t bUnmapInfo;
1631 uint8_t const *pbSrc;
1632 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
1633 if (rc == VINF_SUCCESS)
1634 {
1635 *pbDst = *pbSrc;
1636 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1637 }
1638 return rc;
1639}
1640
1641
1642/**
1643 * Fetches a system table word.
1644 *
1645 * @returns Strict VBox status code.
1646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1647 * @param pu16Dst Where to return the word.
1648 * @param iSegReg The index of the segment register to use for
1649 * this access. The base and limits are checked.
1650 * @param GCPtrMem The address of the guest memory.
1651 */
1652VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
1653{
1654 /* The lazy approach for now... */
1655 uint8_t bUnmapInfo;
1656 uint16_t const *pu16Src;
1657 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
1658 if (rc == VINF_SUCCESS)
1659 {
1660 *pu16Dst = *pu16Src;
1661 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1662 }
1663 return rc;
1664}
1665
1666
1667/**
1668 * Fetches a system table dword.
1669 *
1670 * @returns Strict VBox status code.
1671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1672 * @param pu32Dst Where to return the dword.
1673 * @param iSegReg The index of the segment register to use for
1674 * this access. The base and limits are checked.
1675 * @param GCPtrMem The address of the guest memory.
1676 */
1677VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
1678{
1679 /* The lazy approach for now... */
1680 uint8_t bUnmapInfo;
1681 uint32_t const *pu32Src;
1682 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
1683 if (rc == VINF_SUCCESS)
1684 {
1685 *pu32Dst = *pu32Src;
1686 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1687 }
1688 return rc;
1689}
1690
1691
1692/**
1693 * Fetches a system table qword.
1694 *
1695 * @returns Strict VBox status code.
1696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1697 * @param pu64Dst Where to return the qword.
1698 * @param iSegReg The index of the segment register to use for
1699 * this access. The base and limits are checked.
1700 * @param GCPtrMem The address of the guest memory.
1701 */
1702VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
1703{
1704 /* The lazy approach for now... */
1705 uint8_t bUnmapInfo;
1706 uint64_t const *pu64Src;
1707 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
1708 if (rc == VINF_SUCCESS)
1709 {
1710 *pu64Dst = *pu64Src;
1711 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1712 }
1713 return rc;
1714}
1715
1716
1717/**
1718 * Fetches a descriptor table entry with caller specified error code.
1719 *
1720 * @returns Strict VBox status code.
1721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1722 * @param pDesc Where to return the descriptor table entry.
1723 * @param uSel The selector which table entry to fetch.
1724 * @param uXcpt The exception to raise on table lookup error.
1725 * @param uErrorCode The error code associated with the exception.
1726 */
1727VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
1728 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
1729{
1730 AssertPtr(pDesc);
1731 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1732
1733 /** @todo did the 286 require all 8 bytes to be accessible? */
1734 /*
1735 * Get the selector table base and check bounds.
1736 */
1737 RTGCPTR GCPtrBase;
1738 if (uSel & X86_SEL_LDT)
1739 {
1740 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
1741 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
1742 {
1743 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
1744 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
1745 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
1746 uErrorCode, 0);
1747 }
1748
1749 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
1750 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
1751 }
1752 else
1753 {
1754 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
1755 {
1756 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
1757 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
1758 uErrorCode, 0);
1759 }
1760 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
1761 }
1762
1763 /*
1764 * Read the legacy descriptor and maybe the long mode extensions if
1765 * required.
1766 */
1767 VBOXSTRICTRC rcStrict;
1768 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
1769 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
1770 else
1771 {
1772 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
1773 if (rcStrict == VINF_SUCCESS)
1774 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
1775 if (rcStrict == VINF_SUCCESS)
1776 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
1777 if (rcStrict == VINF_SUCCESS)
1778 pDesc->Legacy.au16[3] = 0;
1779 else
1780 return rcStrict;
1781 }
1782
1783 if (rcStrict == VINF_SUCCESS)
1784 {
1785 if ( !IEM_IS_LONG_MODE(pVCpu)
1786 || pDesc->Legacy.Gen.u1DescType)
1787 pDesc->Long.au64[1] = 0;
1788 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
1789 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
1790 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
1791 else
1792 {
1793 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
1794 /** @todo is this the right exception? */
1795 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
1796 }
1797 }
1798 return rcStrict;
1799}
1800
1801
1802/**
1803 * Fetches a descriptor table entry.
1804 *
1805 * @returns Strict VBox status code.
1806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1807 * @param pDesc Where to return the descriptor table entry.
1808 * @param uSel The selector which table entry to fetch.
1809 * @param uXcpt The exception to raise on table lookup error.
1810 */
1811VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
1812{
1813 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
1814}
1815
1816
1817/**
1818 * Marks the selector descriptor as accessed (only non-system descriptors).
1819 *
1820 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
1821 * will therefore skip the limit checks.
1822 *
1823 * @returns Strict VBox status code.
1824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1825 * @param uSel The selector.
1826 */
1827VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
1828{
1829 /*
1830 * Get the selector table base and calculate the entry address.
1831 */
1832 RTGCPTR GCPtr = uSel & X86_SEL_LDT
1833 ? pVCpu->cpum.GstCtx.ldtr.u64Base
1834 : pVCpu->cpum.GstCtx.gdtr.pGdt;
1835 GCPtr += uSel & X86_SEL_MASK;
1836
1837 /*
1838 * ASMAtomicBitSet will assert if the address is misaligned, so do some
1839 * ugly stuff to avoid this. This will make sure it's an atomic access
1840 * as well more or less remove any question about 8-bit or 32-bit accesss.
1841 */
1842 VBOXSTRICTRC rcStrict;
1843 uint8_t bUnmapInfo;
1844 uint32_t volatile *pu32;
1845 if ((GCPtr & 3) == 0)
1846 {
1847 /* The normal case, map the 32-bit bits around the accessed bit (40). */
1848 GCPtr += 2 + 2;
1849 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
1850 if (rcStrict != VINF_SUCCESS)
1851 return rcStrict;
1852 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
1853 }
1854 else
1855 {
1856 /* The misaligned GDT/LDT case, map the whole thing. */
1857 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
1858 if (rcStrict != VINF_SUCCESS)
1859 return rcStrict;
1860 switch ((uintptr_t)pu32 & 3)
1861 {
1862 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
1863 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
1864 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
1865 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
1866 }
1867 }
1868
1869 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1870}
1871
1872/** @} */
1873
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette