VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllMemRWTmplInline-x86.cpp.h@ 108434

Last change on this file since 108434 was 108278, checked in by vboxsync, 3 months ago

VMM/IEM: Removed the #ifndef IEM_WITH_SETJMP code. We've had IEM_WITH_SETJMP defined unconditionally since 7.0 and the code probably doesn't even compile w/o it, so best remove the unused code. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 86.0 KB
Line 
1/* $Id: IEMAllMemRWTmplInline-x86.cpp.h 108278 2025-02-18 15:46:53Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined R/W Memory Functions Template, x86 target.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_SIZE
34# error "TMPL_MEM_TYPE_SIZE is undefined"
35#endif
36#ifndef TMPL_MEM_TYPE_ALIGN
37# error "TMPL_MEM_TYPE_ALIGN is undefined"
38#endif
39#ifndef TMPL_MEM_FN_SUFF
40# error "TMPL_MEM_FN_SUFF is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_TYPE
43# error "TMPL_MEM_FMT_TYPE is undefined"
44#endif
45#ifndef TMPL_MEM_FMT_DESC
46# error "TMPL_MEM_FMT_DESC is undefined"
47#endif
48
49
50/** Helper for checking if @a a_GCPtr is acceptably aligned and fully within
51 * the page for a TMPL_MEM_TYPE. */
52#if TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
53# define TMPL_MEM_ALIGN_CHECK(a_GCPtr) ( ( !((a_GCPtr) & TMPL_MEM_TYPE_ALIGN) \
54 && ((a_GCPtr) & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(TMPL_MEM_TYPE)) \
55 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, (a_GCPtr), TMPL_MEM_TYPE))
56#else
57# define TMPL_MEM_ALIGN_CHECK(a_GCPtr) ( !((a_GCPtr) & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */ \
58 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, (a_GCPtr), TMPL_MEM_TYPE))
59#endif
60
61/**
62 * Values have to be passed by reference if larger than uint64_t.
63 *
64 * This is a restriction of the Visual C++ AMD64 calling convention,
65 * the gcc AMD64 and ARM64 ABIs can easily pass and return to 128-bit via
66 * registers. For larger values like RTUINT256U, Visual C++ AMD and ARM64
67 * passes them by hidden reference, whereas the gcc AMD64 ABI will use stack.
68 *
69 * So, to avoid passing anything on the stack, we just explictly pass values by
70 * reference (pointer) if they are larger than uint64_t. This ASSUMES 64-bit
71 * host.
72 */
73#if TMPL_MEM_TYPE_SIZE > 8
74# define TMPL_MEM_BY_REF
75#else
76# undef TMPL_MEM_BY_REF
77#endif
78
79
80/*********************************************************************************************************************************
81* Fetches *
82*********************************************************************************************************************************/
83
84/**
85 * Inlined fetch function that longjumps on error.
86 *
87 * @note The @a iSegRef is not allowed to be UINT8_MAX!
88 */
89#ifdef TMPL_MEM_BY_REF
90DECL_INLINE_THROW(void)
91RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
92#else
93DECL_INLINE_THROW(TMPL_MEM_TYPE)
94RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
95#endif
96{
97 AssertCompile(sizeof(TMPL_MEM_TYPE) == TMPL_MEM_TYPE_SIZE);
98#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
99 /*
100 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
101 */
102 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
103# if TMPL_MEM_TYPE_SIZE > 1
104 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
105# endif
106 {
107 /*
108 * TLB lookup.
109 */
110 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
111 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
112 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
113 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
114 {
115 /*
116 * Check TLB page table level access flags.
117 */
118 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
119 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
120 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
121 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
122 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
123 {
124 /*
125 * Fetch and return the data.
126 */
127# ifdef IEM_WITH_TLB_STATISTICS
128 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
129# endif
130 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
131 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
132# ifdef TMPL_MEM_BY_REF
133 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
134 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
135 iSegReg, GCPtrMem, GCPtrEff, pValue));
136 return;
137# else
138 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
139 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE "\n",
140 iSegReg, GCPtrMem, GCPtrEff, uRet));
141 return uRet;
142# endif
143 }
144 }
145 }
146
147 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
148 outdated page pointer, or other troubles. (This will do a TLB load.) */
149 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
150#endif
151#ifdef TMPL_MEM_BY_REF
152 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, iSegReg, GCPtrMem);
153#else
154 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem);
155#endif
156}
157
158
159/**
160 * Inlined flat addressing fetch function that longjumps on error.
161 */
162#ifdef TMPL_MEM_BY_REF
163DECL_INLINE_THROW(void)
164RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
165#else
166DECL_INLINE_THROW(TMPL_MEM_TYPE)
167RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
168#endif
169{
170 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
171 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
172 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec));
173#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
174 /*
175 * Check that it doesn't cross a page boundrary.
176 */
177# if TMPL_MEM_TYPE_SIZE > 1
178 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
179# endif
180 {
181 /*
182 * TLB lookup.
183 */
184 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
185 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
186 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
187 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
188 {
189 /*
190 * Check TLB page table level access flags.
191 */
192 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
193 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
194 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
195 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
196 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
197 {
198 /*
199 * Fetch and return the dword
200 */
201# ifdef IEM_WITH_TLB_STATISTICS
202 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
203# endif
204 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
205 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
206# ifdef TMPL_MEM_BY_REF
207 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
208 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
209 GCPtrMem, pValue));
210 return;
211# else
212 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
213 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet));
214 return uRet;
215# endif
216 }
217 }
218 }
219
220 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
221 outdated page pointer, or other troubles. (This will do a TLB load.) */
222 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
223#endif
224#ifdef TMPL_MEM_BY_REF
225 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, UINT8_MAX, GCPtrMem);
226#else
227 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem);
228#endif
229}
230
231
232/*********************************************************************************************************************************
233* Stores *
234*********************************************************************************************************************************/
235#ifndef TMPL_MEM_NO_STORE
236
237/**
238 * Inlined store function that longjumps on error.
239 *
240 * @note The @a iSegRef is not allowed to be UINT8_MAX!
241 */
242DECL_INLINE_THROW(void)
243RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
244# ifdef TMPL_MEM_BY_REF
245 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
246# else
247 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
248# endif
249{
250# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
251 /*
252 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
253 */
254 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
255# if TMPL_MEM_TYPE_SIZE > 1
256 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
257# endif
258 {
259 /*
260 * TLB lookup.
261 */
262 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
263 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
264 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
265 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
266 {
267 /*
268 * Check TLB page table level access flags.
269 */
270 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
271 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
272 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
273 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
274 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
275 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
276 {
277 /*
278 * Store the value and return.
279 */
280# ifdef IEM_WITH_TLB_STATISTICS
281 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
282# endif
283 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
284 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
285# ifdef TMPL_MEM_BY_REF
286 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = *pValue;
287 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs (%04x:%RX64)\n",
288 iSegReg, GCPtrMem, GCPtrEff, pValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
289# else
290 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
291 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE " (%04x:%RX64)\n",
292 iSegReg, GCPtrMem, GCPtrEff, uValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
293# endif
294 return;
295 }
296 }
297 }
298
299 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
300 outdated page pointer, or other troubles. (This will do a TLB load.) */
301 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
302# endif
303# ifdef TMPL_MEM_BY_REF
304 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, pValue);
305# else
306 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, uValue);
307# endif
308}
309
310
311/**
312 * Inlined flat addressing store function that longjumps on error.
313 */
314DECL_INLINE_THROW(void)
315RT_CONCAT3(iemMemFlatStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
316# ifdef TMPL_MEM_BY_REF
317 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
318# else
319 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
320# endif
321{
322 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
323 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
324 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec));
325# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
326 /*
327 * Check that it doesn't cross a page boundrary.
328 */
329# if TMPL_MEM_TYPE_SIZE > 1
330 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
331# endif
332 {
333 /*
334 * TLB lookup.
335 */
336 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
337 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
338 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
339 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
340 {
341 /*
342 * Check TLB page table level access flags.
343 */
344 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
345 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
346 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
347 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
348 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
349 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
350 {
351 /*
352 * Store the value and return.
353 */
354# ifdef IEM_WITH_TLB_STATISTICS
355 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
356# endif
357 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
358 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
359# ifdef TMPL_MEM_BY_REF
360 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = *pValue;
361 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
362 GCPtrMem, pValue));
363# else
364 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
365 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
366# endif
367 return;
368 }
369 }
370 }
371
372 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
373 outdated page pointer, or other troubles. (This will do a TLB load.) */
374 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
375# endif
376# ifdef TMPL_MEM_BY_REF
377 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, pValue);
378# else
379 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, uValue);
380# endif
381}
382
383#endif /* !TMPL_MEM_NO_STORE */
384
385
386/*********************************************************************************************************************************
387* Mapping / Direct Memory Access *
388*********************************************************************************************************************************/
389#ifndef TMPL_MEM_NO_MAPPING
390
391/**
392 * Inlined read-write memory mapping function that longjumps on error.
393 *
394 * Almost identical to RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtJmp).
395 */
396DECL_INLINE_THROW(TMPL_MEM_TYPE *)
397RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
398 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
399{
400# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
401 /*
402 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
403 */
404 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
405# if TMPL_MEM_TYPE_SIZE > 1
406 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
407# endif
408 {
409 /*
410 * TLB lookup.
411 */
412 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
413 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
414 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
415 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
416 {
417 /*
418 * Check TLB page table level access flags.
419 */
420 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
421 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
422 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
423 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
424 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
425 | fNoUser))
426 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
427 {
428 /*
429 * Return the address.
430 */
431# ifdef IEM_WITH_TLB_STATISTICS
432 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
433# endif
434 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
435 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
436 *pbUnmapInfo = 0;
437 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
438 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
439 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
440 }
441 }
442 }
443
444 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
445 outdated page pointer, or other troubles. (This will do a TLB load.) */
446 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
447# endif
448 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
449}
450
451
452/**
453 * Inlined flat read-write memory mapping function that longjumps on error.
454 *
455 * Almost identical to RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,AtJmp).
456 */
457DECL_INLINE_THROW(TMPL_MEM_TYPE *)
458RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
459 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
460{
461# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
462 /*
463 * Check that the address doesn't cross a page boundrary.
464 */
465# if TMPL_MEM_TYPE_SIZE > 1
466 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
467# endif
468 {
469 /*
470 * TLB lookup.
471 */
472 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
473 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
474 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
475 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
476 {
477 /*
478 * Check TLB page table level access flags.
479 */
480 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
481 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
482 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
483 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
484 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
485 | fNoUser))
486 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
487 {
488 /*
489 * Return the address.
490 */
491# ifdef IEM_WITH_TLB_STATISTICS
492 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
493# endif
494 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
495 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
496 *pbUnmapInfo = 0;
497 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
498 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
499 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
500 }
501 }
502 }
503
504 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
505 outdated page pointer, or other troubles. (This will do a TLB load.) */
506 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
507# endif
508 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
509}
510
511# ifdef TMPL_MEM_WITH_ATOMIC_MAPPING
512
513/**
514 * Inlined atomic read-write memory mapping function that longjumps on error.
515 *
516 * Almost identical to RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwJmp).
517 */
518DECL_INLINE_THROW(TMPL_MEM_TYPE *)
519RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
520 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
521{
522# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
523 /*
524 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
525 */
526 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
527# if TMPL_MEM_TYPE_SIZE > 1
528 if (RT_LIKELY(!(GCPtrEff & TMPL_MEM_TYPE_ALIGN))) /* strictly aligned, otherwise do fall back which knows th details. */
529# endif
530 {
531 /*
532 * TLB lookup.
533 */
534 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
535 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
536 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
537 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
538 {
539 /*
540 * Check TLB page table level access flags.
541 */
542 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
543 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
544 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
545 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
546 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
547 | fNoUser))
548 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
549 {
550 /*
551 * Return the address.
552 */
553# ifdef IEM_WITH_TLB_STATISTICS
554 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
555# endif
556 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
557 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
558 *pbUnmapInfo = 0;
559 Log7Ex(LOG_GROUP_IEM_MEM,("IEM AT/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
560 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
561 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
562 }
563 }
564 }
565
566 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
567 outdated page pointer, or other troubles. (This will do a TLB load.) */
568 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
569# endif
570 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
571}
572
573
574/**
575 * Inlined flat read-write memory mapping function that longjumps on error.
576 *
577 * Almost identical to RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RwJmp).
578 */
579DECL_INLINE_THROW(TMPL_MEM_TYPE *)
580RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,AtJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
581 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
582{
583# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
584 /*
585 * Check that the address doesn't cross a page boundrary.
586 */
587# if TMPL_MEM_TYPE_SIZE > 1
588 if (RT_LIKELY(!(GCPtrMem & TMPL_MEM_TYPE_ALIGN))) /* strictly aligned, otherwise do fall back which knows th details. */
589# endif
590 {
591 /*
592 * TLB lookup.
593 */
594 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
595 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
596 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
597 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
598 {
599 /*
600 * Check TLB page table level access flags.
601 */
602 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
603 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
604 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
605 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
606 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
607 | fNoUser))
608 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
609 {
610 /*
611 * Return the address.
612 */
613# ifdef IEM_WITH_TLB_STATISTICS
614 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
615# endif
616 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
617 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
618 *pbUnmapInfo = 0;
619 Log7Ex(LOG_GROUP_IEM_MEM,("IEM AT/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
620 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
621 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
622 }
623 }
624 }
625
626 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
627 outdated page pointer, or other troubles. (This will do a TLB load.) */
628 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
629# endif
630 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
631}
632
633# endif /* TMPL_MEM_WITH_ATOMIC_MAPPING */
634
635/**
636 * Inlined write-only memory mapping function that longjumps on error.
637 */
638DECL_INLINE_THROW(TMPL_MEM_TYPE *)
639RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
640 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
641{
642# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
643 /*
644 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
645 */
646 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
647# if TMPL_MEM_TYPE_SIZE > 1
648 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
649# endif
650 {
651 /*
652 * TLB lookup.
653 */
654 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
655 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
656 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
657 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
658 {
659 /*
660 * Check TLB page table level access flags.
661 */
662 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
663 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
664 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
665 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
666 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
667 | fNoUser))
668 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
669 {
670 /*
671 * Return the address.
672 */
673# ifdef IEM_WITH_TLB_STATISTICS
674 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
675# endif
676 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
677 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
678 *pbUnmapInfo = 0;
679 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
680 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
681 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
682 }
683 }
684 }
685
686 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
687 outdated page pointer, or other troubles. (This will do a TLB load.) */
688 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
689# endif
690 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
691}
692
693
694/**
695 * Inlined flat write-only memory mapping function that longjumps on error.
696 */
697DECL_INLINE_THROW(TMPL_MEM_TYPE *)
698RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
699 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
700{
701# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
702 /*
703 * Check that the address doesn't cross a page boundrary.
704 */
705# if TMPL_MEM_TYPE_SIZE > 1
706 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
707# endif
708 {
709 /*
710 * TLB lookup.
711 */
712 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
713 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
714 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
715 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
716 {
717 /*
718 * Check TLB page table level access flags.
719 */
720 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
721 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
722 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
723 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
724 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
725 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
726 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
727 {
728 /*
729 * Return the address.
730 */
731# ifdef IEM_WITH_TLB_STATISTICS
732 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
733# endif
734 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
735 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
736 *pbUnmapInfo = 0;
737 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
738 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
739 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
740 }
741 }
742 }
743
744 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
745 outdated page pointer, or other troubles. (This will do a TLB load.) */
746 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
747# endif
748 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
749}
750
751
752/**
753 * Inlined read-only memory mapping function that longjumps on error.
754 */
755DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
756RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
757 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
758{
759# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
760 /*
761 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
762 */
763 RTGCPTR const GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
764# if TMPL_MEM_TYPE_SIZE > 1
765 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
766# endif
767 {
768 /*
769 * TLB lookup.
770 */
771 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
772 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
773 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
774 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
775 {
776 /*
777 * Check TLB page table level access flags.
778 */
779 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
780 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
781 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
782 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
783 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
784 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
785 {
786 /*
787 * Return the address.
788 */
789# ifdef IEM_WITH_TLB_STATISTICS
790 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
791# endif
792 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
793 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
794 *pbUnmapInfo = 0;
795 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
796 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
797 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
798 }
799 }
800 }
801
802 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
803 outdated page pointer, or other troubles. (This will do a TLB load.) */
804 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
805# endif
806 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
807}
808
809
810/**
811 * Inlined read-only memory mapping function that longjumps on error.
812 */
813DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
814RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
815 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
816{
817# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
818 /*
819 * Check that the address doesn't cross a page boundrary.
820 */
821# if TMPL_MEM_TYPE_SIZE > 1
822 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
823# endif
824 {
825 /*
826 * TLB lookup.
827 */
828 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
829 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
830 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
831 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
832 {
833 /*
834 * Check TLB page table level access flags.
835 */
836 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
837 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
838 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
839 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
840 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
841 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
842 {
843 /*
844 * Return the address.
845 */
846# ifdef IEM_WITH_TLB_STATISTICS
847 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
848# endif
849 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
850 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
851 *pbUnmapInfo = 0;
852 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
853 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
854 return (TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
855 }
856 }
857 }
858
859 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
860 outdated page pointer, or other troubles. (This will do a TLB load.) */
861 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
862# endif
863 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
864}
865
866#endif /* !TMPL_MEM_NO_MAPPING */
867
868
869/*********************************************************************************************************************************
870* Stack Access *
871*********************************************************************************************************************************/
872#ifdef TMPL_MEM_WITH_STACK
873# if TMPL_MEM_TYPE_SIZE > 8
874# error "Stack not supported for this type size - please #undef TMPL_MEM_WITH_STACK"
875# endif
876# if TMPL_MEM_TYPE_SIZE > 1 && TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
877# error "Stack not supported for this alignment size - please #undef TMPL_MEM_WITH_STACK"
878# endif
879
880/**
881 * Stack store function that longjmps on error.
882 */
883DECL_INLINE_THROW(void)
884RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
885{
886# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
887 /*
888 * Apply segmentation and check that the item doesn't cross a page boundrary.
889 */
890 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
891# if TMPL_MEM_TYPE_SIZE > 1
892 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
893# endif
894 {
895 /*
896 * TLB lookup.
897 */
898 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
899 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
900 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
901 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
902 {
903 /*
904 * Check TLB page table level access flags.
905 */
906 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
907 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
908 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
909 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
910 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
911 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
912 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
913 {
914 /*
915 * Do the store and return.
916 */
917# ifdef IEM_WITH_TLB_STATISTICS
918 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
919# endif
920 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
921 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
922 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrEff, uValue));
923 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
924 return;
925 }
926 }
927 }
928
929 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
930 outdated page pointer, or other troubles. (This will do a TLB load.) */
931 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
932# endif
933 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue);
934}
935
936
937# ifdef TMPL_WITH_PUSH_SREG
938/**
939 * Stack segment store function that longjmps on error.
940 *
941 * For a detailed discussion of the behaviour see the fallback functions
942 * iemMemStoreStackUxxSRegSafeJmp and iemMemStackPushUxxSRegSafeJmp.
943 */
944DECL_INLINE_THROW(void)
945RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
946 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
947{
948# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
949 /*
950 * Apply segmentation to the address and check that the item doesn't cross
951 * a page boundrary.
952 */
953 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
954# if TMPL_MEM_TYPE_SIZE > 1
955 if (RT_LIKELY( !(GCPtrEff & (sizeof(uint16_t) - 1U))
956 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) ))
957# endif
958 {
959 /*
960 * TLB lookup.
961 */
962 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
963 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
964 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
965 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
966 {
967 /*
968 * Check TLB page table level access flags.
969 */
970 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
971 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
972 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
973 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
974 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
975 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
976 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
977 {
978 /*
979 * Do the push and return.
980 */
981# ifdef IEM_WITH_TLB_STATISTICS
982 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
983# endif
984 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
985 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
986 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg]\n", GCPtrEff, uValue));
987 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
988 return;
989 }
990 }
991 }
992
993 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
994 outdated page pointer, or other troubles. (This will do a TLB load.) */
995 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
996# endif
997 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue);
998}
999# endif /* TMPL_WITH_PUSH_SREG */
1000
1001
1002/**
1003 * Flat stack store function that longjmps on error.
1004 */
1005DECL_INLINE_THROW(void)
1006RT_CONCAT3(iemMemFlatStoreStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
1007 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1008{
1009 Assert( IEM_IS_64BIT_CODE(pVCpu)
1010 || ( pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig
1011 && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC
1012 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
1013 && pVCpu->cpum.GstCtx.ss.u64Base == 0));
1014
1015# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1016 /*
1017 * Check that the item doesn't cross a page boundrary.
1018 */
1019# if TMPL_MEM_TYPE_SIZE > 1
1020 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
1021# endif
1022 {
1023 /*
1024 * TLB lookup.
1025 */
1026 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
1027 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1028 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1029 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1030 {
1031 /*
1032 * Check TLB page table level access flags.
1033 */
1034 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1035 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1036 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1037 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1038 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1039 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1040 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1041 {
1042 /*
1043 * Do the push and return.
1044 */
1045# ifdef IEM_WITH_TLB_STATISTICS
1046 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1047# endif
1048 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1049 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1050 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",
1051 GCPtrMem, pVCpu->cpum.GstCtx.rsp, uValue));
1052 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
1053 return;
1054 }
1055 }
1056 }
1057
1058 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1059 outdated page pointer, or other troubles. (This will do a TLB load.) */
1060 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
1061# endif
1062 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue);
1063}
1064
1065# ifdef TMPL_WITH_PUSH_SREG
1066/**
1067 * Flat stack segment store function that longjmps on error.
1068 *
1069 * For a detailed discussion of the behaviour see the fallback functions
1070 * iemMemStoreStackUxxSRegSafeJmp and iemMemStackPushUxxSRegSafeJmp.
1071 */
1072DECL_INLINE_THROW(void)
1073RT_CONCAT3(iemMemFlatStoreStack,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
1074 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1075{
1076# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1077 /*
1078 * Check that the item doesn't cross a page boundrary.
1079 */
1080 if (RT_LIKELY( !(GCPtrMem & (sizeof(uint16_t) - 1))
1081 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, uint16_t) ))
1082 {
1083 /*
1084 * TLB lookup.
1085 */
1086 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
1087 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1088 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1089 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1090 {
1091 /*
1092 * Check TLB page table level access flags.
1093 */
1094 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1095 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1096 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1097 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1098 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1099 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1100 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1101 {
1102 /*
1103 * Do the push and return.
1104 */
1105# ifdef IEM_WITH_TLB_STATISTICS
1106 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1107# endif
1108 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1109 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1110 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (<-%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
1111 GCPtrMem, pVCpu->cpum.GstCtx.rsp, uValue));
1112 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
1113 return;
1114 }
1115 }
1116 }
1117
1118 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1119 outdated page pointer, or other troubles. (This will do a TLB load.) */
1120 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
1121# endif
1122 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue);
1123}
1124# endif /* TMPL_WITH_PUSH_SREG */
1125
1126
1127/**
1128 * Stack fetch function that longjmps on error.
1129 */
1130DECL_INLINE_THROW(TMPL_MEM_TYPE)
1131RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
1132{
1133# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1134 /*
1135 * Apply segmentation to the address and check that the item doesn't cross
1136 * a page boundrary.
1137 */
1138 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
1139# if TMPL_MEM_TYPE_SIZE > 1
1140 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
1141# endif
1142 {
1143 /*
1144 * TLB lookup.
1145 */
1146 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
1147 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1148 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1149 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1150 {
1151 /*
1152 * Check TLB page table level access flags.
1153 */
1154 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1155 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1156 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1157 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1158 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1159 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1160 {
1161 /*
1162 * Do the pop.
1163 */
1164# ifdef IEM_WITH_TLB_STATISTICS
1165 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1166# endif
1167 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1168 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1169 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
1170 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrEff, uValue));
1171 return uValue;
1172 }
1173 }
1174 }
1175
1176 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1177 outdated page pointer, or other troubles. (This will do a TLB load.) */
1178 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1179# endif
1180 return RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem);
1181}
1182
1183
1184/**
1185 * Flat stack fetch function that longjmps on error.
1186 */
1187DECL_INLINE_THROW(TMPL_MEM_TYPE)
1188RT_CONCAT3(iemMemFlatFetchStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
1189{
1190# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1191 /*
1192 * Check that the item doesn't cross a page boundrary.
1193 */
1194# if TMPL_MEM_TYPE_SIZE > 1
1195 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
1196# endif
1197 {
1198 /*
1199 * TLB lookup.
1200 */
1201 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
1202 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1203 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1204 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1205 {
1206 /*
1207 * Check TLB page table level access flags.
1208 */
1209 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1210 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1211 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1212 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1213 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1214 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1215 {
1216 /*
1217 * Do the pop.
1218 */
1219# ifdef IEM_WITH_TLB_STATISTICS
1220 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1221# endif
1222 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1223 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1224 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
1225 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
1226 return uValue;
1227 }
1228 }
1229 }
1230
1231 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1232 outdated page pointer, or other troubles. (This will do a TLB load.) */
1233 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
1234# endif
1235 return RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem);
1236}
1237
1238
1239/**
1240 * Stack push function that longjmps on error.
1241 */
1242DECL_INLINE_THROW(void)
1243RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1244{
1245# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1246 /*
1247 * Decrement the stack pointer (prep), apply segmentation and check that
1248 * the item doesn't cross a page boundrary.
1249 */
1250 uint64_t uNewRsp;
1251 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
1252 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
1253# if TMPL_MEM_TYPE_SIZE > 1
1254 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
1255# endif
1256 {
1257 /*
1258 * TLB lookup.
1259 */
1260 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
1261 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1262 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1263 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1264 {
1265 /*
1266 * Check TLB page table level access flags.
1267 */
1268 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1269 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1270 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1271 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1272 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1273 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1274 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1275 {
1276 /*
1277 * Do the push and return.
1278 */
1279# ifdef IEM_WITH_TLB_STATISTICS
1280 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1281# endif
1282 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1283 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1284 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
1285 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
1286 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
1287 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1288 return;
1289 }
1290 }
1291 }
1292
1293 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1294 outdated page pointer, or other troubles. (This will do a TLB load.) */
1295 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1296# endif
1297 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1298}
1299
1300
1301/**
1302 * Stack pop greg function that longjmps on error.
1303 */
1304DECL_INLINE_THROW(void)
1305RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1306{
1307 Assert(iGReg < 16);
1308
1309# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1310 /*
1311 * Increment the stack pointer (prep), apply segmentation and check that
1312 * the item doesn't cross a page boundrary.
1313 */
1314 uint64_t uNewRsp;
1315 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
1316 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
1317# if TMPL_MEM_TYPE_SIZE > 1
1318 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
1319# endif
1320 {
1321 /*
1322 * TLB lookup.
1323 */
1324 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
1325 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1326 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1327 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1328 {
1329 /*
1330 * Check TLB page table level access flags.
1331 */
1332 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1333 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1334 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1335 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1336 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1337 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1338 {
1339 /*
1340 * Do the pop.
1341 */
1342# ifdef IEM_WITH_TLB_STATISTICS
1343 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1344# endif
1345 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1346 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1347 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
1348 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1349 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
1350 pVCpu->cpum.GstCtx.rsp = uNewRsp; /* must be first for 16-bit */
1351# if TMPL_MEM_TYPE_SIZE == 2
1352 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1353# elif TMPL_MEM_TYPE_SIZE == 4 || TMPL_MEM_TYPE_SIZE == 8
1354 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1355# else
1356# error "TMPL_MEM_TYPE_SIZE"
1357# endif
1358 return;
1359 }
1360 }
1361 }
1362
1363 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1364 outdated page pointer, or other troubles. (This will do a TLB load.) */
1365 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1366# endif
1367 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1368}
1369
1370# ifdef TMPL_WITH_PUSH_SREG
1371/**
1372 * Stack segment push function that longjmps on error.
1373 *
1374 * For a detailed discussion of the behaviour see the fallback functions
1375 * iemMemStackPushUxxSRegSafeJmp.
1376 */
1377DECL_INLINE_THROW(void)
1378RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1379{
1380# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1381 /* See fallback for details on this weirdness: */
1382 bool const fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu);
1383 uint8_t const cbAccess = fIsIntel && !IEM_IS_REAL_MODE(pVCpu) ? sizeof(uint16_t) : sizeof(TMPL_MEM_TYPE);
1384
1385 /*
1386 * Decrement the stack pointer (prep), apply segmentation and check that
1387 * the item doesn't cross a page boundrary.
1388 */
1389 uint64_t uNewRsp;
1390 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
1391 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, cbAccess, GCPtrTop);
1392# if TMPL_MEM_TYPE_SIZE > 1
1393 if (RT_LIKELY( !(GCPtrEff & (cbAccess - 1U))
1394 || ( cbAccess == sizeof(uint16_t)
1395 ? TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t)
1396 : TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ) ))
1397# endif
1398 {
1399 /*
1400 * TLB lookup.
1401 */
1402 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
1403 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1404 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1405 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1406 {
1407 /*
1408 * Check TLB page table level access flags.
1409 */
1410 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1411 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1412 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1413 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1414 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1415 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1416 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1417 {
1418 /*
1419 * Do the push and return.
1420 */
1421# ifdef IEM_WITH_TLB_STATISTICS
1422 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1423# endif
1424 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1425 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1426 if (cbAccess == sizeof(uint16_t))
1427 {
1428 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR 'word' SS|%RGv (%RX64->%RX64): %#06x [sreg/i]\n",
1429 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, (uint16_t)uValue));
1430 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
1431 }
1432 else
1433 {
1434 TMPL_MEM_TYPE * const puSlot = (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
1435 if (fIsIntel)
1436 {
1437 Assert(IEM_IS_REAL_MODE(pVCpu));
1438 uValue = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK));
1439 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg/ir]\n",
1440 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
1441 }
1442 else
1443 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
1444 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
1445 *puSlot = uValue;
1446 }
1447 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1448 return;
1449 }
1450 }
1451 }
1452
1453 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1454 outdated page pointer, or other troubles. (This will do a TLB load.) */
1455 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1456# endif
1457 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
1458}
1459# endif /* TMPL_WITH_PUSH_SREG */
1460
1461# if TMPL_MEM_TYPE_SIZE != 8
1462
1463/**
1464 * 32-bit flat stack push function that longjmps on error.
1465 */
1466DECL_INLINE_THROW(void)
1467RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1468{
1469 Assert( pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig
1470 && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC
1471 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
1472 && pVCpu->cpum.GstCtx.ss.u64Base == 0);
1473# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1474 /*
1475 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1476 */
1477 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
1478# if TMPL_MEM_TYPE_SIZE > 1
1479 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewEsp)))
1480# endif
1481 {
1482 /*
1483 * TLB lookup.
1484 */
1485 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV((RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
1486 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1487 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1488 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1489 {
1490 /*
1491 * Check TLB page table level access flags.
1492 */
1493 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1494 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1495 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1496 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1497 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1498 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1499 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1500 {
1501 /*
1502 * Do the push and return.
1503 */
1504# ifdef IEM_WITH_TLB_STATISTICS
1505 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1506# endif
1507 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1508 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1509 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE "\n",
1510 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
1511 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = uValue;
1512 pVCpu->cpum.GstCtx.rsp = uNewEsp;
1513 return;
1514 }
1515 }
1516 }
1517
1518 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1519 outdated page pointer, or other troubles. (This will do a TLB load.) */
1520 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
1521# endif
1522 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1523}
1524
1525
1526/**
1527 * 32-bit flat stack greg pop function that longjmps on error.
1528 */
1529DECL_INLINE_THROW(void)
1530RT_CONCAT3(iemMemFlat32StackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1531{
1532 Assert(iGReg < 16);
1533# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1534 /*
1535 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1536 */
1537 uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp;
1538# if TMPL_MEM_TYPE_SIZE > 1
1539 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldEsp)))
1540# endif
1541 {
1542 /*
1543 * TLB lookup.
1544 */
1545 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV((RTGCPTR)uOldEsp); /* Cast is required! 2023-08-11 */
1546 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1547 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1548 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1549 {
1550 /*
1551 * Check TLB page table level access flags.
1552 */
1553 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1554 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1555 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1556 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1557 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1558 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1559 {
1560 /*
1561 * Do the pop and update the register values.
1562 */
1563# ifdef IEM_WITH_TLB_STATISTICS
1564 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1565# endif
1566 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1567 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1568 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK];
1569 pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */
1570# if TMPL_MEM_TYPE_SIZE == 2
1571 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1572# elif TMPL_MEM_TYPE_SIZE == 4
1573 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1574# else
1575# error "TMPL_MEM_TYPE_SIZE"
1576# endif
1577 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1578 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg));
1579 return;
1580 }
1581 }
1582 }
1583
1584 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1585 outdated page pointer, or other troubles. (This will do a TLB load.) */
1586 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uOldEsp));
1587# endif
1588 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1589}
1590
1591# endif /* TMPL_MEM_TYPE_SIZE != 8*/
1592
1593# ifdef TMPL_WITH_PUSH_SREG
1594/**
1595 * 32-bit flat stack segment push function that longjmps on error.
1596 *
1597 * For a detailed discussion of the behaviour see the fallback functions
1598 * iemMemStackPushUxxSRegSafeJmp.
1599 */
1600DECL_INLINE_THROW(void)
1601RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1602{
1603# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1604 /* See fallback for details on this weirdness: */
1605 bool const fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu);
1606 uint8_t const cbAccess = fIsIntel && !IEM_IS_REAL_MODE(pVCpu) ? sizeof(uint16_t) : sizeof(TMPL_MEM_TYPE);
1607
1608 /*
1609 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1610 */
1611 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
1612 if (RT_LIKELY( !(uNewEsp & (cbAccess - 1))
1613 || (cbAccess == sizeof(uint16_t)
1614 ? TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, uint16_t)
1615 : TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, TMPL_MEM_TYPE)) ))
1616 {
1617 /*
1618 * TLB lookup.
1619 */
1620 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV((RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
1621 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1622 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1623 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1624 {
1625 /*
1626 * Check TLB page table level access flags.
1627 */
1628 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1629 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1630 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1631 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1632 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1633 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1634 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1635 {
1636 /*
1637 * Do the push and return.
1638 */
1639# ifdef IEM_WITH_TLB_STATISTICS
1640 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1641# endif
1642 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1643 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1644 if (cbAccess == sizeof(uint16_t))
1645 {
1646 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR 'word' SS|%RX32 (<-%RX32): %#06x [sreg/i]\n",
1647 uNewEsp, pVCpu->cpum.GstCtx.esp, (uint16_t)uValue));
1648 *(uint16_t *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
1649 }
1650 else
1651 {
1652 TMPL_MEM_TYPE * const puSlot = (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK];
1653 if (fIsIntel)
1654 {
1655 Assert(IEM_IS_REAL_MODE(pVCpu));
1656 uValue = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK));
1657 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-RX32): " TMPL_MEM_FMT_TYPE " [sreg/ir]\n",
1658 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
1659 }
1660 else
1661 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-RX32): " TMPL_MEM_FMT_TYPE " [sreg]\n",
1662 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
1663 *puSlot = uValue;
1664 }
1665 pVCpu->cpum.GstCtx.rsp = uNewEsp;
1666 return;
1667 }
1668 }
1669 }
1670
1671 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1672 outdated page pointer, or other troubles. (This will do a TLB load.) */
1673 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
1674# endif
1675 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
1676}
1677# endif /* TMPL_WITH_PUSH_SREG */
1678
1679# if TMPL_MEM_TYPE_SIZE != 4
1680
1681/**
1682 * 64-bit flat stack push function that longjmps on error.
1683 */
1684DECL_INLINE_THROW(void)
1685RT_CONCAT3(iemMemFlat64StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1686{
1687# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1688 /*
1689 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1690 */
1691 uint64_t const uNewRsp = pVCpu->cpum.GstCtx.rsp - sizeof(TMPL_MEM_TYPE);
1692# if TMPL_MEM_TYPE_SIZE > 1
1693 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewRsp)))
1694# endif
1695 {
1696 /*
1697 * TLB lookup.
1698 */
1699 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uNewRsp);
1700 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1701 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1702 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1703 {
1704 /*
1705 * Check TLB page table level access flags.
1706 */
1707 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1708 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1709 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1710 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1711 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1712 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1713 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1714 {
1715 /*
1716 * Do the push and return.
1717 */
1718# ifdef IEM_WITH_TLB_STATISTICS
1719 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1720# endif
1721 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1722 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1723 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX64 (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",
1724 uNewRsp, pVCpu->cpum.GstCtx.esp, uValue));
1725 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewRsp & GUEST_PAGE_OFFSET_MASK] = uValue;
1726 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1727 return;
1728 }
1729 }
1730 }
1731
1732 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1733 outdated page pointer, or other troubles. (This will do a TLB load.) */
1734 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uNewRsp));
1735# endif
1736 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1737}
1738
1739
1740/**
1741 * 64-bit flat stack pop function that longjmps on error.
1742 */
1743DECL_INLINE_THROW(void)
1744RT_CONCAT3(iemMemFlat64StackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1745{
1746 Assert(iGReg < 16);
1747# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1748 /*
1749 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1750 */
1751 uint64_t const uOldRsp = pVCpu->cpum.GstCtx.rsp;
1752# if TMPL_MEM_TYPE_SIZE > 1
1753 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldRsp)))
1754# endif
1755 {
1756 /*
1757 * TLB lookup.
1758 */
1759 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uOldRsp);
1760 PCIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
1761 if (RT_LIKELY( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
1762 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
1763 {
1764 /*
1765 * Check TLB page table level access flags.
1766 */
1767 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1768 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1769 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1770 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1771 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1772 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1773 {
1774 /*
1775 * Do the push and return.
1776 */
1777# ifdef IEM_WITH_TLB_STATISTICS
1778 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
1779# endif
1780 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1781 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1782 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldRsp & GUEST_PAGE_OFFSET_MASK];
1783 pVCpu->cpum.GstCtx.rsp = uOldRsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */
1784# if TMPL_MEM_TYPE_SIZE == 2
1785 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1786# elif TMPL_MEM_TYPE_SIZE == 8
1787 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1788# else
1789# error "TMPL_MEM_TYPE_SIZE"
1790# endif
1791 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX64 (->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1792 uOldRsp, uOldRsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg));
1793 return;
1794 }
1795 }
1796 }
1797
1798 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1799 outdated page pointer, or other troubles. (This will do a TLB load.) */
1800 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uOldRsp));
1801# endif
1802 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1803}
1804
1805# endif /* TMPL_MEM_TYPE_SIZE != 4 */
1806
1807#endif /* TMPL_MEM_WITH_STACK */
1808
1809
1810#undef TMPL_MEM_TYPE
1811#undef TMPL_MEM_TYPE_ALIGN
1812#undef TMPL_MEM_TYPE_SIZE
1813#undef TMPL_MEM_FN_SUFF
1814#undef TMPL_MEM_FMT_TYPE
1815#undef TMPL_MEM_FMT_DESC
1816#undef TMPL_MEM_NO_STORE
1817#undef TMPL_MEM_ALIGN_CHECK
1818#undef TMPL_MEM_BY_REF
1819
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette