VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h@ 102781

Last change on this file since 102781 was 102766, checked in by vboxsync, 15 months ago

VMM/IEM: Reworking native translation of IEM_MC_*PUSH* in prep for doing TLB lookups. bugreg:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 67.8 KB
Line 
1/* $Id: IEMAllMemRWTmplInline.cpp.h 102766 2024-01-04 20:53:03Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_SIZE
34# error "TMPL_MEM_TYPE_SIZE is undefined"
35#endif
36#ifndef TMPL_MEM_TYPE_ALIGN
37# error "TMPL_MEM_TYPE_ALIGN is undefined"
38#endif
39#ifndef TMPL_MEM_FN_SUFF
40# error "TMPL_MEM_FN_SUFF is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_TYPE
43# error "TMPL_MEM_FMT_TYPE is undefined"
44#endif
45#ifndef TMPL_MEM_FMT_DESC
46# error "TMPL_MEM_FMT_DESC is undefined"
47#endif
48
49
50/** Helper for checking if @a a_GCPtr is acceptably aligned and fully within
51 * the page for a TMPL_MEM_TYPE. */
52#if TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
53# define TMPL_MEM_ALIGN_CHECK(a_GCPtr) ( ( !((a_GCPtr) & TMPL_MEM_TYPE_ALIGN) \
54 && ((a_GCPtr) & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(TMPL_MEM_TYPE)) \
55 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, (a_GCPtr), TMPL_MEM_TYPE))
56#else
57# define TMPL_MEM_ALIGN_CHECK(a_GCPtr) ( !((a_GCPtr) & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */ \
58 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, (a_GCPtr), TMPL_MEM_TYPE))
59#endif
60
61/**
62 * Values have to be passed by reference if larger than uint64_t.
63 *
64 * This is a restriction of the Visual C++ AMD64 calling convention,
65 * the gcc AMD64 and ARM64 ABIs can easily pass and return to 128-bit via
66 * registers. For larger values like RTUINT256U, Visual C++ AMD and ARM64
67 * passes them by hidden reference, whereas the gcc AMD64 ABI will use stack.
68 *
69 * So, to avoid passing anything on the stack, we just explictly pass values by
70 * reference (pointer) if they are larger than uint64_t. This ASSUMES 64-bit
71 * host.
72 */
73#if TMPL_MEM_TYPE_SIZE > 8
74# define TMPL_MEM_BY_REF
75#else
76# undef TMPL_MEM_BY_REF
77#endif
78
79
80#ifdef IEM_WITH_SETJMP
81
82
83/*********************************************************************************************************************************
84* Fetches *
85*********************************************************************************************************************************/
86
87/**
88 * Inlined fetch function that longjumps on error.
89 *
90 * @note The @a iSegRef is not allowed to be UINT8_MAX!
91 */
92#ifdef TMPL_MEM_BY_REF
93DECL_INLINE_THROW(void)
94RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
95#else
96DECL_INLINE_THROW(TMPL_MEM_TYPE)
97RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
98#endif
99{
100 AssertCompile(sizeof(TMPL_MEM_TYPE) == TMPL_MEM_TYPE_SIZE);
101# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
102 /*
103 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
104 */
105 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
106# if TMPL_MEM_TYPE_SIZE > 1
107 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
108# endif
109 {
110 /*
111 * TLB lookup.
112 */
113 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
114 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
115 if (RT_LIKELY(pTlbe->uTag == uTag))
116 {
117 /*
118 * Check TLB page table level access flags.
119 */
120 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
121 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
122 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
123 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
124 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
125 {
126 /*
127 * Fetch and return the data.
128 */
129 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
130 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
131 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
132# ifdef TMPL_MEM_BY_REF
133 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
134 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
135 iSegReg, GCPtrMem, GCPtrEff, pValue));
136 return;
137# else
138 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
139 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE "\n",
140 iSegReg, GCPtrMem, GCPtrEff, uRet));
141 return uRet;
142# endif
143 }
144 }
145 }
146
147 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
148 outdated page pointer, or other troubles. (This will do a TLB load.) */
149 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
150# endif
151# ifdef TMPL_MEM_BY_REF
152 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, iSegReg, GCPtrMem);
153# else
154 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem);
155# endif
156}
157
158
159/**
160 * Inlined flat addressing fetch function that longjumps on error.
161 */
162# ifdef TMPL_MEM_BY_REF
163DECL_INLINE_THROW(void)
164RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
165# else
166DECL_INLINE_THROW(TMPL_MEM_TYPE)
167RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
168# endif
169{
170 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
171 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
172 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec));
173# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
174 /*
175 * Check that it doesn't cross a page boundrary.
176 */
177# if TMPL_MEM_TYPE_SIZE > 1
178 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
179# endif
180 {
181 /*
182 * TLB lookup.
183 */
184 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
185 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
186 if (RT_LIKELY(pTlbe->uTag == uTag))
187 {
188 /*
189 * Check TLB page table level access flags.
190 */
191 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
192 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
193 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
194 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
195 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
196 {
197 /*
198 * Fetch and return the dword
199 */
200 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
201 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
202 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
203# ifdef TMPL_MEM_BY_REF
204 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
205 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
206 GCPtrMem, pValue));
207 return;
208# else
209 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
210 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet));
211 return uRet;
212# endif
213 }
214 }
215 }
216
217 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
218 outdated page pointer, or other troubles. (This will do a TLB load.) */
219 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
220# endif
221# ifdef TMPL_MEM_BY_REF
222 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, UINT8_MAX, GCPtrMem);
223# else
224 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem);
225# endif
226}
227
228
229/*********************************************************************************************************************************
230* Stores *
231*********************************************************************************************************************************/
232# ifndef TMPL_MEM_NO_STORE
233
234/**
235 * Inlined store function that longjumps on error.
236 *
237 * @note The @a iSegRef is not allowed to be UINT8_MAX!
238 */
239DECL_INLINE_THROW(void)
240RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
241# ifdef TMPL_MEM_BY_REF
242 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
243# else
244 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
245# endif
246{
247# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
248 /*
249 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
250 */
251 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
252# if TMPL_MEM_TYPE_SIZE > 1
253 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
254# endif
255 {
256 /*
257 * TLB lookup.
258 */
259 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
260 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
261 if (RT_LIKELY(pTlbe->uTag == uTag))
262 {
263 /*
264 * Check TLB page table level access flags.
265 */
266 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
267 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
268 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
269 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
270 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
271 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
272 {
273 /*
274 * Store the value and return.
275 */
276 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
277 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
278 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
279# ifdef TMPL_MEM_BY_REF
280 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = *pValue;
281 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs (%04x:%RX64)\n",
282 iSegReg, GCPtrMem, GCPtrEff, pValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
283# else
284 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
285 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE " (%04x:%RX64)\n",
286 iSegReg, GCPtrMem, GCPtrEff, uValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
287# endif
288 return;
289 }
290 }
291 }
292
293 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
294 outdated page pointer, or other troubles. (This will do a TLB load.) */
295 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
296# endif
297# ifdef TMPL_MEM_BY_REF
298 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, pValue);
299# else
300 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, uValue);
301# endif
302}
303
304
305/**
306 * Inlined flat addressing store function that longjumps on error.
307 */
308DECL_INLINE_THROW(void)
309RT_CONCAT3(iemMemFlatStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
310# ifdef TMPL_MEM_BY_REF
311 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
312# else
313 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
314# endif
315{
316 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
317 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
318 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec));
319# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
320 /*
321 * Check that it doesn't cross a page boundrary.
322 */
323# if TMPL_MEM_TYPE_SIZE > 1
324 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
325# endif
326 {
327 /*
328 * TLB lookup.
329 */
330 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
331 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
332 if (RT_LIKELY(pTlbe->uTag == uTag))
333 {
334 /*
335 * Check TLB page table level access flags.
336 */
337 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
338 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
339 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
340 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
341 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
342 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
343 {
344 /*
345 * Store the value and return.
346 */
347 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
348 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
349 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
350# ifdef TMPL_MEM_BY_REF
351 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = *pValue;
352 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
353 GCPtrMem, pValue));
354# else
355 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
356 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
357# endif
358 return;
359 }
360 }
361 }
362
363 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
364 outdated page pointer, or other troubles. (This will do a TLB load.) */
365 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
366# endif
367# ifdef TMPL_MEM_BY_REF
368 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, pValue);
369# else
370 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, uValue);
371# endif
372}
373
374# endif /* !TMPL_MEM_NO_STORE */
375
376
377/*********************************************************************************************************************************
378* Mapping / Direct Memory Access *
379*********************************************************************************************************************************/
380# ifndef TMPL_MEM_NO_MAPPING
381
382/**
383 * Inlined read-write memory mapping function that longjumps on error.
384 */
385DECL_INLINE_THROW(TMPL_MEM_TYPE *)
386RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
387 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
388{
389# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
390 /*
391 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
392 */
393 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
394# if TMPL_MEM_TYPE_SIZE > 1
395 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
396# endif
397 {
398 /*
399 * TLB lookup.
400 */
401 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
402 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
403 if (RT_LIKELY(pTlbe->uTag == uTag))
404 {
405 /*
406 * Check TLB page table level access flags.
407 */
408 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
409 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
410 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
411 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
412 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
413 | fNoUser))
414 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
415 {
416 /*
417 * Return the address.
418 */
419 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
420 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
421 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
422 *pbUnmapInfo = 0;
423 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
424 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
425 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
426 }
427 }
428 }
429
430 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
431 outdated page pointer, or other troubles. (This will do a TLB load.) */
432 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
433# endif
434 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
435}
436
437
438/**
439 * Inlined flat read-write memory mapping function that longjumps on error.
440 */
441DECL_INLINE_THROW(TMPL_MEM_TYPE *)
442RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
443 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
444{
445# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
446 /*
447 * Check that the address doesn't cross a page boundrary.
448 */
449# if TMPL_MEM_TYPE_SIZE > 1
450 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
451# endif
452 {
453 /*
454 * TLB lookup.
455 */
456 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
457 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
458 if (RT_LIKELY(pTlbe->uTag == uTag))
459 {
460 /*
461 * Check TLB page table level access flags.
462 */
463 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
464 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
465 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
466 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
467 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
468 | fNoUser))
469 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
470 {
471 /*
472 * Return the address.
473 */
474 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
475 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
476 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
477 *pbUnmapInfo = 0;
478 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
479 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
480 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
481 }
482 }
483 }
484
485 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
486 outdated page pointer, or other troubles. (This will do a TLB load.) */
487 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
488# endif
489 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
490}
491
492
493/**
494 * Inlined write-only memory mapping function that longjumps on error.
495 */
496DECL_INLINE_THROW(TMPL_MEM_TYPE *)
497RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
498 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
499{
500# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
501 /*
502 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
503 */
504 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
505# if TMPL_MEM_TYPE_SIZE > 1
506 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
507# endif
508 {
509 /*
510 * TLB lookup.
511 */
512 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
513 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
514 if (RT_LIKELY(pTlbe->uTag == uTag))
515 {
516 /*
517 * Check TLB page table level access flags.
518 */
519 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
520 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
521 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
522 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
523 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
524 | fNoUser))
525 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
526 {
527 /*
528 * Return the address.
529 */
530 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
531 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
532 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
533 *pbUnmapInfo = 0;
534 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
535 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
536 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
537 }
538 }
539 }
540
541 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
542 outdated page pointer, or other troubles. (This will do a TLB load.) */
543 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
544# endif
545 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
546}
547
548
549/**
550 * Inlined flat write-only memory mapping function that longjumps on error.
551 */
552DECL_INLINE_THROW(TMPL_MEM_TYPE *)
553RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
554 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
555{
556# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
557 /*
558 * Check that the address doesn't cross a page boundrary.
559 */
560# if TMPL_MEM_TYPE_SIZE > 1
561 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
562# endif
563 {
564 /*
565 * TLB lookup.
566 */
567 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
568 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
569 if (RT_LIKELY(pTlbe->uTag == uTag))
570 {
571 /*
572 * Check TLB page table level access flags.
573 */
574 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
575 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
576 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
577 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
578 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
579 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
580 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
581 {
582 /*
583 * Return the address.
584 */
585 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
586 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
587 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
588 *pbUnmapInfo = 0;
589 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
590 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
591 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
592 }
593 }
594 }
595
596 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
597 outdated page pointer, or other troubles. (This will do a TLB load.) */
598 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
599# endif
600 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
601}
602
603
604/**
605 * Inlined read-only memory mapping function that longjumps on error.
606 */
607DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
608RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
609 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
610{
611# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
612 /*
613 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
614 */
615 RTGCPTR const GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
616# if TMPL_MEM_TYPE_SIZE > 1
617 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
618#endif
619 {
620 /*
621 * TLB lookup.
622 */
623 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
624 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
625 if (RT_LIKELY(pTlbe->uTag == uTag))
626 {
627 /*
628 * Check TLB page table level access flags.
629 */
630 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
631 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
632 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
633 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
634 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
635 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
636 {
637 /*
638 * Return the address.
639 */
640 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
641 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
642 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
643 *pbUnmapInfo = 0;
644 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
645 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
646 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
647 }
648 }
649 }
650
651 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
652 outdated page pointer, or other troubles. (This will do a TLB load.) */
653 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
654# endif
655 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
656}
657
658
659/**
660 * Inlined read-only memory mapping function that longjumps on error.
661 */
662DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
663RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
664 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
665{
666# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
667 /*
668 * Check that the address doesn't cross a page boundrary.
669 */
670# if TMPL_MEM_TYPE_SIZE > 1
671 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
672# endif
673 {
674 /*
675 * TLB lookup.
676 */
677 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
678 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
679 if (RT_LIKELY(pTlbe->uTag == uTag))
680 {
681 /*
682 * Check TLB page table level access flags.
683 */
684 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
685 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
686 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
687 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
688 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
689 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
690 {
691 /*
692 * Return the address.
693 */
694 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
695 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
696 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
697 *pbUnmapInfo = 0;
698 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
699 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
700 return (TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
701 }
702 }
703 }
704
705 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
706 outdated page pointer, or other troubles. (This will do a TLB load.) */
707 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
708# endif
709 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
710}
711
712# endif /* !TMPL_MEM_NO_MAPPING */
713
714
715/*********************************************************************************************************************************
716* Stack Access *
717*********************************************************************************************************************************/
718# ifdef TMPL_MEM_WITH_STACK
719# if TMPL_MEM_TYPE_SIZE > 8
720# error "Stack not supported for this type size - please #undef TMPL_MEM_WITH_STACK"
721# endif
722# if TMPL_MEM_TYPE_SIZE > 1 && TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
723# error "Stack not supported for this alignment size - please #undef TMPL_MEM_WITH_STACK"
724# endif
725# ifdef IEM_WITH_SETJMP
726
727/**
728 * Stack store function that longjmps on error.
729 */
730DECL_INLINE_THROW(void)
731RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
732{
733# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
734 /*
735 * Apply segmentation and check that the item doesn't cross a page boundrary.
736 */
737 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
738# if TMPL_MEM_TYPE_SIZE > 1
739 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
740# endif
741 {
742 /*
743 * TLB lookup.
744 */
745 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
746 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
747 if (RT_LIKELY(pTlbe->uTag == uTag))
748 {
749 /*
750 * Check TLB page table level access flags.
751 */
752 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
753 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
754 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
755 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
756 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
757 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
758 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
759 {
760 /*
761 * Do the store and return.
762 */
763 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
764 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
765 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
766 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrEff, uValue));
767 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
768 return;
769 }
770 }
771 }
772
773 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
774 outdated page pointer, or other troubles. (This will do a TLB load.) */
775 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
776# endif
777 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue);
778}
779
780
781# ifdef TMPL_WITH_PUSH_SREG
782/**
783 * Stack segment store function that longjmps on error.
784 *
785 * For a detailed discussion of the behaviour see the fallback functions
786 * iemMemStoreStackUxxSRegSafeJmp and iemMemStackPushUxxSRegSafeJmp.
787 */
788DECL_INLINE_THROW(void)
789RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
790 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
791{
792# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
793 /*
794 * Decrement the stack pointer (prep), apply segmentation and check that
795 * the item doesn't cross a page boundrary.
796 */
797 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
798# if TMPL_MEM_TYPE_SIZE > 1
799 if (RT_LIKELY( !(GCPtrEff & (sizeof(uint16_t) - 1U))
800 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) ))
801# endif
802 {
803 /*
804 * TLB lookup.
805 */
806 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
807 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
808 if (RT_LIKELY(pTlbe->uTag == uTag))
809 {
810 /*
811 * Check TLB page table level access flags.
812 */
813 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
814 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
815 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
816 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
817 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
818 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
819 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
820 {
821 /*
822 * Do the push and return.
823 */
824 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
825 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
826 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
827 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg]\n", GCPtrEff, uValue));
828 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
829 return;
830 }
831 }
832 }
833
834 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
835 outdated page pointer, or other troubles. (This will do a TLB load.) */
836 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
837# endif
838 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue);
839}
840# endif /* TMPL_WITH_PUSH_SREG */
841
842
843/**
844 * Flat stack store function that longjmps on error.
845 */
846DECL_INLINE_THROW(void)
847RT_CONCAT3(iemMemFlatStoreStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
848 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
849{
850 Assert( IEM_IS_64BIT_CODE(pVCpu)
851 || ( pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig
852 && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC
853 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
854 && pVCpu->cpum.GstCtx.ss.u64Base == 0));
855
856# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
857 /*
858 * Check that the item doesn't cross a page boundrary.
859 */
860# if TMPL_MEM_TYPE_SIZE > 1
861 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
862# endif
863 {
864 /*
865 * TLB lookup.
866 */
867 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
868 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
869 if (RT_LIKELY(pTlbe->uTag == uTag))
870 {
871 /*
872 * Check TLB page table level access flags.
873 */
874 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
875 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
876 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
877 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
878 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
879 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
880 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
881 {
882 /*
883 * Do the push and return.
884 */
885 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
886 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
887 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
888 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",
889 GCPtrMem, pVCpu->cpum.GstCtx.rsp, uValue));
890 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
891 return;
892 }
893 }
894 }
895
896 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
897 outdated page pointer, or other troubles. (This will do a TLB load.) */
898 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
899# endif
900 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue);
901}
902
903# ifdef TMPL_WITH_PUSH_SREG
904/**
905 * Flat stack segment store function that longjmps on error.
906 *
907 * For a detailed discussion of the behaviour see the fallback functions
908 * iemMemStoreStackUxxSRegSafeJmp and iemMemStackPushUxxSRegSafeJmp.
909 */
910DECL_INLINE_THROW(void)
911RT_CONCAT3(iemMemFlatStoreStack,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
912 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
913{
914# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
915 /*
916 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
917 */
918 if (RT_LIKELY( !(GCPtrMem & (sizeof(uint16_t) - 1))
919 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, uint16_t) ))
920 {
921 /*
922 * TLB lookup.
923 */
924 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
925 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
926 if (RT_LIKELY(pTlbe->uTag == uTag))
927 {
928 /*
929 * Check TLB page table level access flags.
930 */
931 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
932 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
933 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
934 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
935 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
936 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
937 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
938 {
939 /*
940 * Do the push and return.
941 */
942 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
943 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
944 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
945 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (<-%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
946 GCPtrMem, pVCpu->cpum.GstCtx.rsp, uValue));
947 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
948 return;
949 }
950 }
951 }
952
953 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
954 outdated page pointer, or other troubles. (This will do a TLB load.) */
955 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
956# endif
957 RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue);
958}
959# endif /* TMPL_WITH_PUSH_SREG */
960
961
962
963/**
964 * Stack push function that longjmps on error.
965 */
966DECL_INLINE_THROW(void)
967RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
968{
969# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
970 /*
971 * Decrement the stack pointer (prep), apply segmentation and check that
972 * the item doesn't cross a page boundrary.
973 */
974 uint64_t uNewRsp;
975 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
976 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
977# if TMPL_MEM_TYPE_SIZE > 1
978 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
979# endif
980 {
981 /*
982 * TLB lookup.
983 */
984 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
985 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
986 if (RT_LIKELY(pTlbe->uTag == uTag))
987 {
988 /*
989 * Check TLB page table level access flags.
990 */
991 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
992 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
993 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
994 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
995 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
996 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
997 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
998 {
999 /*
1000 * Do the push and return.
1001 */
1002 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1003 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1004 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1005 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
1006 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
1007 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
1008 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1009 return;
1010 }
1011 }
1012 }
1013
1014 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1015 outdated page pointer, or other troubles. (This will do a TLB load.) */
1016 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1017# endif
1018 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1019}
1020
1021
1022/**
1023 * Stack pop greg function that longjmps on error.
1024 */
1025DECL_INLINE_THROW(void)
1026RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1027{
1028 Assert(iGReg < 16);
1029
1030# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1031 /*
1032 * Increment the stack pointer (prep), apply segmentation and check that
1033 * the item doesn't cross a page boundrary.
1034 */
1035 uint64_t uNewRsp;
1036 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
1037 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
1038# if TMPL_MEM_TYPE_SIZE > 1
1039 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
1040# endif
1041 {
1042 /*
1043 * TLB lookup.
1044 */
1045 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
1046 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1047 if (RT_LIKELY(pTlbe->uTag == uTag))
1048 {
1049 /*
1050 * Check TLB page table level access flags.
1051 */
1052 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1053 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1054 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1055 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1056 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1057 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1058 {
1059 /*
1060 * Do the pop.
1061 */
1062 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1063 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1064 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1065 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
1066 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1067 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
1068 pVCpu->cpum.GstCtx.rsp = uNewRsp; /* must be first for 16-bit */
1069# if TMPL_MEM_TYPE_SIZE == 2
1070 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1071# elif TMPL_MEM_TYPE_SIZE == 4 || TMPL_MEM_TYPE_SIZE == 8
1072 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1073# else
1074# error "TMPL_MEM_TYPE_SIZE"
1075# endif
1076 return;
1077 }
1078 }
1079 }
1080
1081 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1082 outdated page pointer, or other troubles. (This will do a TLB load.) */
1083 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1084# endif
1085 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1086}
1087
1088# ifdef TMPL_WITH_PUSH_SREG
1089/**
1090 * Stack segment push function that longjmps on error.
1091 *
1092 * For a detailed discussion of the behaviour see the fallback functions
1093 * iemMemStackPushUxxSRegSafeJmp.
1094 */
1095DECL_INLINE_THROW(void)
1096RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1097{
1098# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1099 /*
1100 * Decrement the stack pointer (prep), apply segmentation and check that
1101 * the item doesn't cross a page boundrary.
1102 */
1103 uint64_t uNewRsp;
1104 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
1105 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
1106# if TMPL_MEM_TYPE_SIZE > 1
1107 if (RT_LIKELY( !(GCPtrEff & (sizeof(uint16_t) - 1U))
1108 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) ))
1109# endif
1110 {
1111 /*
1112 * TLB lookup.
1113 */
1114 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
1115 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1116 if (RT_LIKELY(pTlbe->uTag == uTag))
1117 {
1118 /*
1119 * Check TLB page table level access flags.
1120 */
1121 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1122 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1123 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1124 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1125 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1126 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1127 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1128 {
1129 /*
1130 * Do the push and return.
1131 */
1132 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1133 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1134 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1135 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
1136 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
1137 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
1138 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1139 return;
1140 }
1141 }
1142 }
1143
1144 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1145 outdated page pointer, or other troubles. (This will do a TLB load.) */
1146 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
1147# endif
1148 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
1149}
1150# endif /* TMPL_WITH_PUSH_SREG */
1151
1152# if TMPL_MEM_TYPE_SIZE != 8
1153
1154/**
1155 * 32-bit flat stack push function that longjmps on error.
1156 */
1157DECL_INLINE_THROW(void)
1158RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1159{
1160 Assert( pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig
1161 && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC
1162 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
1163 && pVCpu->cpum.GstCtx.ss.u64Base == 0);
1164# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1165 /*
1166 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1167 */
1168 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
1169# if TMPL_MEM_TYPE_SIZE > 1
1170 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewEsp)))
1171# endif
1172 {
1173 /*
1174 * TLB lookup.
1175 */
1176 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
1177 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1178 if (RT_LIKELY(pTlbe->uTag == uTag))
1179 {
1180 /*
1181 * Check TLB page table level access flags.
1182 */
1183 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1184 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1185 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1186 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1187 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1188 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1189 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1190 {
1191 /*
1192 * Do the push and return.
1193 */
1194 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1195 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1196 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1197 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE "\n",
1198 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
1199 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = uValue;
1200 pVCpu->cpum.GstCtx.rsp = uNewEsp;
1201 return;
1202 }
1203 }
1204 }
1205
1206 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1207 outdated page pointer, or other troubles. (This will do a TLB load.) */
1208 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
1209# endif
1210 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1211}
1212
1213
1214/**
1215 * 32-bit flat stack greg pop function that longjmps on error.
1216 */
1217DECL_INLINE_THROW(void)
1218RT_CONCAT3(iemMemFlat32StackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1219{
1220 Assert(iGReg < 16);
1221# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1222 /*
1223 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1224 */
1225 uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp;
1226# if TMPL_MEM_TYPE_SIZE > 1
1227 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldEsp)))
1228# endif
1229 {
1230 /*
1231 * TLB lookup.
1232 */
1233 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uOldEsp); /* Cast is required! 2023-08-11 */
1234 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1235 if (RT_LIKELY(pTlbe->uTag == uTag))
1236 {
1237 /*
1238 * Check TLB page table level access flags.
1239 */
1240 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1241 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1242 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1243 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1244 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1245 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1246 {
1247 /*
1248 * Do the pop and update the register values.
1249 */
1250 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1251 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1252 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1253 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK];
1254 pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */
1255# if TMPL_MEM_TYPE_SIZE == 2
1256 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1257# elif TMPL_MEM_TYPE_SIZE == 4
1258 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1259# else
1260# error "TMPL_MEM_TYPE_SIZE"
1261# endif
1262 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1263 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg));
1264 return;
1265 }
1266 }
1267 }
1268
1269 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1270 outdated page pointer, or other troubles. (This will do a TLB load.) */
1271 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uOldEsp));
1272# endif
1273 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1274}
1275
1276# endif /* TMPL_MEM_TYPE_SIZE != 8*/
1277
1278# ifdef TMPL_WITH_PUSH_SREG
1279/**
1280 * 32-bit flat stack segment push function that longjmps on error.
1281 *
1282 * For a detailed discussion of the behaviour see the fallback functions
1283 * iemMemStackPushUxxSRegSafeJmp.
1284 */
1285DECL_INLINE_THROW(void)
1286RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1287{
1288# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1289 /*
1290 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1291 */
1292 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
1293 if (RT_LIKELY( !(uNewEsp & (sizeof(uint16_t) - 1))
1294 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, uint16_t) ))
1295 {
1296 /*
1297 * TLB lookup.
1298 */
1299 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
1300 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1301 if (RT_LIKELY(pTlbe->uTag == uTag))
1302 {
1303 /*
1304 * Check TLB page table level access flags.
1305 */
1306 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1307 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1308 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1309 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1310 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1311 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1312 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1313 {
1314 /*
1315 * Do the push and return.
1316 */
1317 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1318 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1319 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1320 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE " [sreg]\n",
1321 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
1322 *(uint16_t *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
1323 pVCpu->cpum.GstCtx.rsp = uNewEsp;
1324 return;
1325 }
1326 }
1327 }
1328
1329 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1330 outdated page pointer, or other troubles. (This will do a TLB load.) */
1331 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
1332# endif
1333 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
1334}
1335# endif /* TMPL_WITH_PUSH_SREG */
1336
1337# if TMPL_MEM_TYPE_SIZE != 4
1338
1339/**
1340 * 64-bit flat stack push function that longjmps on error.
1341 */
1342DECL_INLINE_THROW(void)
1343RT_CONCAT3(iemMemFlat64StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1344{
1345# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1346 /*
1347 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1348 */
1349 uint64_t const uNewRsp = pVCpu->cpum.GstCtx.rsp - sizeof(TMPL_MEM_TYPE);
1350# if TMPL_MEM_TYPE_SIZE > 1
1351 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewRsp)))
1352# endif
1353 {
1354 /*
1355 * TLB lookup.
1356 */
1357 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, uNewRsp);
1358 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1359 if (RT_LIKELY(pTlbe->uTag == uTag))
1360 {
1361 /*
1362 * Check TLB page table level access flags.
1363 */
1364 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1365 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1366 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1367 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1368 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1369 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1370 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1371 {
1372 /*
1373 * Do the push and return.
1374 */
1375 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1376 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1377 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1378 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX64 (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",
1379 uNewRsp, pVCpu->cpum.GstCtx.esp, uValue));
1380 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewRsp & GUEST_PAGE_OFFSET_MASK] = uValue;
1381 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1382 return;
1383 }
1384 }
1385 }
1386
1387 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1388 outdated page pointer, or other troubles. (This will do a TLB load.) */
1389 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uNewRsp));
1390# endif
1391 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1392}
1393
1394
1395/**
1396 * 64-bit flat stack pop function that longjmps on error.
1397 */
1398DECL_INLINE_THROW(void)
1399RT_CONCAT3(iemMemFlat64StackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1400{
1401 Assert(iGReg < 16);
1402# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1403 /*
1404 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1405 */
1406 uint64_t const uOldRsp = pVCpu->cpum.GstCtx.rsp;
1407# if TMPL_MEM_TYPE_SIZE > 1
1408 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldRsp)))
1409# endif
1410 {
1411 /*
1412 * TLB lookup.
1413 */
1414 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, uOldRsp);
1415 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1416 if (RT_LIKELY(pTlbe->uTag == uTag))
1417 {
1418 /*
1419 * Check TLB page table level access flags.
1420 */
1421 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1422 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1423 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1424 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1425 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1426 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1427 {
1428 /*
1429 * Do the push and return.
1430 */
1431 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1432 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1433 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1434 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldRsp & GUEST_PAGE_OFFSET_MASK];
1435 pVCpu->cpum.GstCtx.rsp = uOldRsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */
1436# if TMPL_MEM_TYPE_SIZE == 2
1437 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1438# elif TMPL_MEM_TYPE_SIZE == 8
1439 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1440# else
1441# error "TMPL_MEM_TYPE_SIZE"
1442# endif
1443 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX64 (->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1444 uOldRsp, uOldRsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg));
1445 return;
1446 }
1447 }
1448 }
1449
1450 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1451 outdated page pointer, or other troubles. (This will do a TLB load.) */
1452 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uOldRsp));
1453# endif
1454 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1455}
1456
1457# endif /* TMPL_MEM_TYPE_SIZE != 4 */
1458
1459# endif /* IEM_WITH_SETJMP */
1460# endif /* TMPL_MEM_WITH_STACK */
1461
1462
1463#endif /* IEM_WITH_SETJMP */
1464
1465#undef TMPL_MEM_TYPE
1466#undef TMPL_MEM_TYPE_ALIGN
1467#undef TMPL_MEM_TYPE_SIZE
1468#undef TMPL_MEM_FN_SUFF
1469#undef TMPL_MEM_FMT_TYPE
1470#undef TMPL_MEM_FMT_DESC
1471#undef TMPL_MEM_NO_STORE
1472#undef TMPL_MEM_ALIGN_CHECK
1473#undef TMPL_MEM_BY_REF
1474
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette