VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h@ 102727

Last change on this file since 102727 was 102572, checked in by vboxsync, 15 months ago

VMM/IEM: IEM_MC_POP_Uxx -> IEM_MC_POP_GREG_Uxx, popping by register number instead of reference (pointer). bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 56.7 KB
Line 
1/* $Id: IEMAllMemRWTmplInline.cpp.h 102572 2023-12-11 15:20:48Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_SIZE
34# error "TMPL_MEM_TYPE_SIZE is undefined"
35#endif
36#ifndef TMPL_MEM_TYPE_ALIGN
37# error "TMPL_MEM_TYPE_ALIGN is undefined"
38#endif
39#ifndef TMPL_MEM_FN_SUFF
40# error "TMPL_MEM_FN_SUFF is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_TYPE
43# error "TMPL_MEM_FMT_TYPE is undefined"
44#endif
45#ifndef TMPL_MEM_FMT_DESC
46# error "TMPL_MEM_FMT_DESC is undefined"
47#endif
48
49
50/** Helper for checking if @a a_GCPtr is acceptably aligned and fully within
51 * the page for a TMPL_MEM_TYPE. */
52#if TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
53# define TMPL_MEM_ALIGN_CHECK(a_GCPtr) ( ( !((a_GCPtr) & TMPL_MEM_TYPE_ALIGN) \
54 && ((a_GCPtr) & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(TMPL_MEM_TYPE)) \
55 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, (a_GCPtr), TMPL_MEM_TYPE))
56#else
57# define TMPL_MEM_ALIGN_CHECK(a_GCPtr) ( !((a_GCPtr) & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */ \
58 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, (a_GCPtr), TMPL_MEM_TYPE))
59#endif
60
61/**
62 * Values have to be passed by reference if larger than uint64_t.
63 *
64 * This is a restriction of the Visual C++ AMD64 calling convention,
65 * the gcc AMD64 and ARM64 ABIs can easily pass and return to 128-bit via
66 * registers. For larger values like RTUINT256U, Visual C++ AMD and ARM64
67 * passes them by hidden reference, whereas the gcc AMD64 ABI will use stack.
68 *
69 * So, to avoid passing anything on the stack, we just explictly pass values by
70 * reference (pointer) if they are larger than uint64_t. This ASSUMES 64-bit
71 * host.
72 */
73#if TMPL_MEM_TYPE_SIZE > 8
74# define TMPL_MEM_BY_REF
75#else
76# undef TMPL_MEM_BY_REF
77#endif
78
79
80#ifdef IEM_WITH_SETJMP
81
82
83/*********************************************************************************************************************************
84* Fetches *
85*********************************************************************************************************************************/
86
87/**
88 * Inlined fetch function that longjumps on error.
89 *
90 * @note The @a iSegRef is not allowed to be UINT8_MAX!
91 */
92#ifdef TMPL_MEM_BY_REF
93DECL_INLINE_THROW(void)
94RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
95#else
96DECL_INLINE_THROW(TMPL_MEM_TYPE)
97RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
98#endif
99{
100 AssertCompile(sizeof(TMPL_MEM_TYPE) == TMPL_MEM_TYPE_SIZE);
101# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
102 /*
103 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
104 */
105 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
106# if TMPL_MEM_TYPE_SIZE > 1
107 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
108# endif
109 {
110 /*
111 * TLB lookup.
112 */
113 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
114 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
115 if (RT_LIKELY(pTlbe->uTag == uTag))
116 {
117 /*
118 * Check TLB page table level access flags.
119 */
120 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
121 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
122 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
123 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
124 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
125 {
126 /*
127 * Fetch and return the data.
128 */
129 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
130 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
131 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
132# ifdef TMPL_MEM_BY_REF
133 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
134 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
135 iSegReg, GCPtrMem, GCPtrEff, pValue));
136 return;
137# else
138 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
139 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE "\n",
140 iSegReg, GCPtrMem, GCPtrEff, uRet));
141 return uRet;
142# endif
143 }
144 }
145 }
146
147 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
148 outdated page pointer, or other troubles. (This will do a TLB load.) */
149 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
150# endif
151# ifdef TMPL_MEM_BY_REF
152 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, iSegReg, GCPtrMem);
153# else
154 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem);
155# endif
156}
157
158
159/**
160 * Inlined flat addressing fetch function that longjumps on error.
161 */
162# ifdef TMPL_MEM_BY_REF
163DECL_INLINE_THROW(void)
164RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
165# else
166DECL_INLINE_THROW(TMPL_MEM_TYPE)
167RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
168# endif
169{
170 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
171 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
172 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec));
173# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
174 /*
175 * Check that it doesn't cross a page boundrary.
176 */
177# if TMPL_MEM_TYPE_SIZE > 1
178 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
179# endif
180 {
181 /*
182 * TLB lookup.
183 */
184 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
185 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
186 if (RT_LIKELY(pTlbe->uTag == uTag))
187 {
188 /*
189 * Check TLB page table level access flags.
190 */
191 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
192 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
193 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
194 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
195 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
196 {
197 /*
198 * Fetch and return the dword
199 */
200 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
201 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
202 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
203# ifdef TMPL_MEM_BY_REF
204 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
205 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
206 GCPtrMem, pValue));
207 return;
208# else
209 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
210 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet));
211 return uRet;
212# endif
213 }
214 }
215 }
216
217 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
218 outdated page pointer, or other troubles. (This will do a TLB load.) */
219 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
220# endif
221# ifdef TMPL_MEM_BY_REF
222 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, UINT8_MAX, GCPtrMem);
223# else
224 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem);
225# endif
226}
227
228
229/*********************************************************************************************************************************
230* Stores *
231*********************************************************************************************************************************/
232# ifndef TMPL_MEM_NO_STORE
233
234/**
235 * Inlined store function that longjumps on error.
236 *
237 * @note The @a iSegRef is not allowed to be UINT8_MAX!
238 */
239DECL_INLINE_THROW(void)
240RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
241# ifdef TMPL_MEM_BY_REF
242 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
243# else
244 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
245# endif
246{
247# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
248 /*
249 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
250 */
251 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
252# if TMPL_MEM_TYPE_SIZE > 1
253 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
254# endif
255 {
256 /*
257 * TLB lookup.
258 */
259 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
260 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
261 if (RT_LIKELY(pTlbe->uTag == uTag))
262 {
263 /*
264 * Check TLB page table level access flags.
265 */
266 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
267 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
268 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
269 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
270 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
271 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
272 {
273 /*
274 * Store the value and return.
275 */
276 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
277 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
278 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
279# ifdef TMPL_MEM_BY_REF
280 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = *pValue;
281 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs (%04x:%RX64)\n",
282 iSegReg, GCPtrMem, GCPtrEff, pValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
283# else
284 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
285 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE " (%04x:%RX64)\n",
286 iSegReg, GCPtrMem, GCPtrEff, uValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
287# endif
288 return;
289 }
290 }
291 }
292
293 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
294 outdated page pointer, or other troubles. (This will do a TLB load.) */
295 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
296# endif
297# ifdef TMPL_MEM_BY_REF
298 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, pValue);
299# else
300 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, uValue);
301# endif
302}
303
304
305/**
306 * Inlined flat addressing store function that longjumps on error.
307 */
308DECL_INLINE_THROW(void)
309RT_CONCAT3(iemMemFlatStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
310# ifdef TMPL_MEM_BY_REF
311 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
312# else
313 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
314# endif
315{
316 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
317 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
318 || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec));
319# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
320 /*
321 * Check that it doesn't cross a page boundrary.
322 */
323# if TMPL_MEM_TYPE_SIZE > 1
324 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
325# endif
326 {
327 /*
328 * TLB lookup.
329 */
330 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
331 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
332 if (RT_LIKELY(pTlbe->uTag == uTag))
333 {
334 /*
335 * Check TLB page table level access flags.
336 */
337 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
338 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
339 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
340 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
341 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
342 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
343 {
344 /*
345 * Store the value and return.
346 */
347 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
348 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
349 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
350# ifdef TMPL_MEM_BY_REF
351 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = *pValue;
352 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
353 GCPtrMem, pValue));
354# else
355 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
356 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
357# endif
358 return;
359 }
360 }
361 }
362
363 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
364 outdated page pointer, or other troubles. (This will do a TLB load.) */
365 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
366# endif
367# ifdef TMPL_MEM_BY_REF
368 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, pValue);
369# else
370 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, uValue);
371# endif
372}
373
374# endif /* !TMPL_MEM_NO_STORE */
375
376
377/*********************************************************************************************************************************
378* Mapping / Direct Memory Access *
379*********************************************************************************************************************************/
380# ifndef TMPL_MEM_NO_MAPPING
381
382/**
383 * Inlined read-write memory mapping function that longjumps on error.
384 */
385DECL_INLINE_THROW(TMPL_MEM_TYPE *)
386RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
387 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
388{
389# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
390 /*
391 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
392 */
393 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
394# if TMPL_MEM_TYPE_SIZE > 1
395 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
396# endif
397 {
398 /*
399 * TLB lookup.
400 */
401 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
402 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
403 if (RT_LIKELY(pTlbe->uTag == uTag))
404 {
405 /*
406 * Check TLB page table level access flags.
407 */
408 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
409 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
410 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
411 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
412 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
413 | fNoUser))
414 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
415 {
416 /*
417 * Return the address.
418 */
419 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
420 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
421 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
422 *pbUnmapInfo = 0;
423 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
424 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
425 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
426 }
427 }
428 }
429
430 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
431 outdated page pointer, or other troubles. (This will do a TLB load.) */
432 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
433# endif
434 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
435}
436
437
438/**
439 * Inlined flat read-write memory mapping function that longjumps on error.
440 */
441DECL_INLINE_THROW(TMPL_MEM_TYPE *)
442RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
443 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
444{
445# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
446 /*
447 * Check that the address doesn't cross a page boundrary.
448 */
449# if TMPL_MEM_TYPE_SIZE > 1
450 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
451# endif
452 {
453 /*
454 * TLB lookup.
455 */
456 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
457 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
458 if (RT_LIKELY(pTlbe->uTag == uTag))
459 {
460 /*
461 * Check TLB page table level access flags.
462 */
463 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
464 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
465 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
466 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
467 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
468 | fNoUser))
469 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
470 {
471 /*
472 * Return the address.
473 */
474 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
475 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
476 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
477 *pbUnmapInfo = 0;
478 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
479 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
480 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
481 }
482 }
483 }
484
485 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
486 outdated page pointer, or other troubles. (This will do a TLB load.) */
487 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
488# endif
489 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
490}
491
492
493/**
494 * Inlined write-only memory mapping function that longjumps on error.
495 */
496DECL_INLINE_THROW(TMPL_MEM_TYPE *)
497RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
498 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
499{
500# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
501 /*
502 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
503 */
504 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
505# if TMPL_MEM_TYPE_SIZE > 1
506 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
507# endif
508 {
509 /*
510 * TLB lookup.
511 */
512 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
513 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
514 if (RT_LIKELY(pTlbe->uTag == uTag))
515 {
516 /*
517 * Check TLB page table level access flags.
518 */
519 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
520 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
521 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
522 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
523 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
524 | fNoUser))
525 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
526 {
527 /*
528 * Return the address.
529 */
530 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
531 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
532 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
533 *pbUnmapInfo = 0;
534 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
535 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
536 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
537 }
538 }
539 }
540
541 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
542 outdated page pointer, or other troubles. (This will do a TLB load.) */
543 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
544# endif
545 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
546}
547
548
549/**
550 * Inlined flat write-only memory mapping function that longjumps on error.
551 */
552DECL_INLINE_THROW(TMPL_MEM_TYPE *)
553RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
554 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
555{
556# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
557 /*
558 * Check that the address doesn't cross a page boundrary.
559 */
560# if TMPL_MEM_TYPE_SIZE > 1
561 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
562# endif
563 {
564 /*
565 * TLB lookup.
566 */
567 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
568 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
569 if (RT_LIKELY(pTlbe->uTag == uTag))
570 {
571 /*
572 * Check TLB page table level access flags.
573 */
574 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
575 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
576 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
577 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
578 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
579 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
580 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
581 {
582 /*
583 * Return the address.
584 */
585 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
586 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
587 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
588 *pbUnmapInfo = 0;
589 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
590 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
591 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
592 }
593 }
594 }
595
596 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
597 outdated page pointer, or other troubles. (This will do a TLB load.) */
598 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
599# endif
600 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
601}
602
603
604/**
605 * Inlined read-only memory mapping function that longjumps on error.
606 */
607DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
608RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
609 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
610{
611# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
612 /*
613 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
614 */
615 RTGCPTR const GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
616# if TMPL_MEM_TYPE_SIZE > 1
617 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
618#endif
619 {
620 /*
621 * TLB lookup.
622 */
623 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
624 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
625 if (RT_LIKELY(pTlbe->uTag == uTag))
626 {
627 /*
628 * Check TLB page table level access flags.
629 */
630 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
631 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
632 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
633 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
634 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
635 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
636 {
637 /*
638 * Return the address.
639 */
640 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
641 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
642 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
643 *pbUnmapInfo = 0;
644 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
645 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
646 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
647 }
648 }
649 }
650
651 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
652 outdated page pointer, or other troubles. (This will do a TLB load.) */
653 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
654# endif
655 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
656}
657
658
659/**
660 * Inlined read-only memory mapping function that longjumps on error.
661 */
662DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
663RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
664 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
665{
666# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
667 /*
668 * Check that the address doesn't cross a page boundrary.
669 */
670# if TMPL_MEM_TYPE_SIZE > 1
671 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
672# endif
673 {
674 /*
675 * TLB lookup.
676 */
677 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
678 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
679 if (RT_LIKELY(pTlbe->uTag == uTag))
680 {
681 /*
682 * Check TLB page table level access flags.
683 */
684 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
685 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
686 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
687 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
688 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
689 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
690 {
691 /*
692 * Return the address.
693 */
694 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
695 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
696 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
697 *pbUnmapInfo = 0;
698 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
699 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
700 return (TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
701 }
702 }
703 }
704
705 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
706 outdated page pointer, or other troubles. (This will do a TLB load.) */
707 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
708# endif
709 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
710}
711
712# endif /* !TMPL_MEM_NO_MAPPING */
713
714
715/*********************************************************************************************************************************
716* Stack Access *
717*********************************************************************************************************************************/
718# ifdef TMPL_MEM_WITH_STACK
719# if TMPL_MEM_TYPE_SIZE > 8
720# error "Stack not supported for this type size - please #undef TMPL_MEM_WITH_STACK"
721# endif
722# if TMPL_MEM_TYPE_SIZE > 1 && TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
723# error "Stack not supported for this alignment size - please #undef TMPL_MEM_WITH_STACK"
724# endif
725# ifdef IEM_WITH_SETJMP
726
727/**
728 * Stack push function that longjmps on error.
729 */
730DECL_INLINE_THROW(void)
731RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
732{
733# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
734 /*
735 * Decrement the stack pointer (prep), apply segmentation and check that
736 * the item doesn't cross a page boundrary.
737 */
738 uint64_t uNewRsp;
739 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
740 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
741# if TMPL_MEM_TYPE_SIZE > 1
742 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
743# endif
744 {
745 /*
746 * TLB lookup.
747 */
748 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
749 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
750 if (RT_LIKELY(pTlbe->uTag == uTag))
751 {
752 /*
753 * Check TLB page table level access flags.
754 */
755 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
756 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
757 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
758 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
759 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
760 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
761 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
762 {
763 /*
764 * Do the push and return.
765 */
766 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
767 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
768 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
769 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
770 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
771 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
772 pVCpu->cpum.GstCtx.rsp = uNewRsp;
773 return;
774 }
775 }
776 }
777
778 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
779 outdated page pointer, or other troubles. (This will do a TLB load.) */
780 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
781# endif
782 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
783}
784
785
786/**
787 * Stack pop greg function that longjmps on error.
788 */
789DECL_INLINE_THROW(void)
790RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
791{
792 Assert(iGReg < 16);
793
794# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
795 /*
796 * Increment the stack pointer (prep), apply segmentation and check that
797 * the item doesn't cross a page boundrary.
798 */
799 uint64_t uNewRsp;
800 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
801 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
802# if TMPL_MEM_TYPE_SIZE > 1
803 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
804# endif
805 {
806 /*
807 * TLB lookup.
808 */
809 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
810 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
811 if (RT_LIKELY(pTlbe->uTag == uTag))
812 {
813 /*
814 * Check TLB page table level access flags.
815 */
816 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
817 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
818 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
819 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
820 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
821 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
822 {
823 /*
824 * Do the pop.
825 */
826 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
827 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
828 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
829 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
830 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
831 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
832 pVCpu->cpum.GstCtx.rsp = uNewRsp; /* must be first for 16-bit */
833# if TMPL_MEM_TYPE_SIZE == 2
834 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
835# elif TMPL_MEM_TYPE_SIZE == 4 || TMPL_MEM_TYPE_SIZE == 8
836 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
837# else
838# error "TMPL_MEM_TYPE_SIZE"
839# endif
840 return;
841 }
842 }
843 }
844
845 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
846 outdated page pointer, or other troubles. (This will do a TLB load.) */
847 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
848# endif
849 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
850}
851
852# ifdef TMPL_WITH_PUSH_SREG
853/**
854 * Stack segment push function that longjmps on error.
855 *
856 * For a detailed discussion of the behaviour see the fallback functions
857 * iemMemStackPushUxxSRegSafeJmp.
858 */
859DECL_INLINE_THROW(void)
860RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
861{
862# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
863 /*
864 * Decrement the stack pointer (prep), apply segmentation and check that
865 * the item doesn't cross a page boundrary.
866 */
867 uint64_t uNewRsp;
868 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
869 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
870# if TMPL_MEM_TYPE_SIZE > 1
871 if (RT_LIKELY( !(GCPtrEff & (sizeof(uint16_t) - 1U))
872 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) ))
873# endif
874 {
875 /*
876 * TLB lookup.
877 */
878 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
879 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
880 if (RT_LIKELY(pTlbe->uTag == uTag))
881 {
882 /*
883 * Check TLB page table level access flags.
884 */
885 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
886 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
887 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
888 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
889 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
890 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
891 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
892 {
893 /*
894 * Do the push and return.
895 */
896 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
897 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
898 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
899 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
900 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
901 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
902 pVCpu->cpum.GstCtx.rsp = uNewRsp;
903 return;
904 }
905 }
906 }
907
908 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
909 outdated page pointer, or other troubles. (This will do a TLB load.) */
910 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
911# endif
912 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
913}
914
915# endif
916# if TMPL_MEM_TYPE_SIZE != 8
917
918/**
919 * 32-bit flat stack push function that longjmps on error.
920 */
921DECL_INLINE_THROW(void)
922RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
923{
924 Assert( pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig
925 && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC
926 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
927 && pVCpu->cpum.GstCtx.ss.u64Base == 0);
928# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
929 /*
930 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
931 */
932 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
933# if TMPL_MEM_TYPE_SIZE > 1
934 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewEsp)))
935# endif
936 {
937 /*
938 * TLB lookup.
939 */
940 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
941 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
942 if (RT_LIKELY(pTlbe->uTag == uTag))
943 {
944 /*
945 * Check TLB page table level access flags.
946 */
947 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
948 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
949 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
950 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
951 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
952 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
953 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
954 {
955 /*
956 * Do the push and return.
957 */
958 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
959 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
960 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
961 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE "\n",
962 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
963 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = uValue;
964 pVCpu->cpum.GstCtx.rsp = uNewEsp;
965 return;
966 }
967 }
968 }
969
970 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
971 outdated page pointer, or other troubles. (This will do a TLB load.) */
972 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
973# endif
974 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
975}
976
977
978/**
979 * 32-bit flat stack greg pop function that longjmps on error.
980 */
981DECL_INLINE_THROW(void)
982RT_CONCAT3(iemMemFlat32StackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
983{
984 Assert(iGReg < 16);
985# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
986 /*
987 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
988 */
989 uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp;
990# if TMPL_MEM_TYPE_SIZE > 1
991 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldEsp)))
992# endif
993 {
994 /*
995 * TLB lookup.
996 */
997 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uOldEsp); /* Cast is required! 2023-08-11 */
998 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
999 if (RT_LIKELY(pTlbe->uTag == uTag))
1000 {
1001 /*
1002 * Check TLB page table level access flags.
1003 */
1004 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1005 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1006 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1007 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1008 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1009 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1010 {
1011 /*
1012 * Do the pop and update the register values.
1013 */
1014 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1015 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1016 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1017 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK];
1018 pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */
1019# if TMPL_MEM_TYPE_SIZE == 2
1020 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1021# elif TMPL_MEM_TYPE_SIZE == 4
1022 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1023# else
1024# error "TMPL_MEM_TYPE_SIZE"
1025# endif
1026 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1027 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg));
1028 return;
1029 }
1030 }
1031 }
1032
1033 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1034 outdated page pointer, or other troubles. (This will do a TLB load.) */
1035 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uOldEsp));
1036# endif
1037 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1038}
1039
1040# endif /* TMPL_MEM_TYPE_SIZE != 8*/
1041# ifdef TMPL_WITH_PUSH_SREG
1042/**
1043 * 32-bit flat stack segment push function that longjmps on error.
1044 *
1045 * For a detailed discussion of the behaviour see the fallback functions
1046 * iemMemStackPushUxxSRegSafeJmp.
1047 */
1048DECL_INLINE_THROW(void)
1049RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1050{
1051# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1052 /*
1053 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1054 */
1055 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
1056 if (RT_LIKELY( !(uNewEsp & (sizeof(uint16_t) - 1))
1057 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, uint16_t) ))
1058 {
1059 /*
1060 * TLB lookup.
1061 */
1062 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
1063 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1064 if (RT_LIKELY(pTlbe->uTag == uTag))
1065 {
1066 /*
1067 * Check TLB page table level access flags.
1068 */
1069 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1070 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1071 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1072 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1073 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1074 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1075 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1076 {
1077 /*
1078 * Do the push and return.
1079 */
1080 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1081 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1082 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1083 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE " [sreg]\n",
1084 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
1085 *(uint16_t *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
1086 pVCpu->cpum.GstCtx.rsp = uNewEsp;
1087 return;
1088 }
1089 }
1090 }
1091
1092 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1093 outdated page pointer, or other troubles. (This will do a TLB load.) */
1094 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
1095# endif
1096 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
1097}
1098
1099# endif
1100# if TMPL_MEM_TYPE_SIZE != 4
1101
1102/**
1103 * 64-bit flat stack push function that longjmps on error.
1104 */
1105DECL_INLINE_THROW(void)
1106RT_CONCAT3(iemMemFlat64StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1107{
1108# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1109 /*
1110 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1111 */
1112 uint64_t const uNewRsp = pVCpu->cpum.GstCtx.rsp - sizeof(TMPL_MEM_TYPE);
1113# if TMPL_MEM_TYPE_SIZE > 1
1114 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewRsp)))
1115# endif
1116 {
1117 /*
1118 * TLB lookup.
1119 */
1120 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, uNewRsp);
1121 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1122 if (RT_LIKELY(pTlbe->uTag == uTag))
1123 {
1124 /*
1125 * Check TLB page table level access flags.
1126 */
1127 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1128 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1129 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1130 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1131 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1132 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1133 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1134 {
1135 /*
1136 * Do the push and return.
1137 */
1138 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1139 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1140 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1141 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX64 (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",
1142 uNewRsp, pVCpu->cpum.GstCtx.esp, uValue));
1143 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewRsp & GUEST_PAGE_OFFSET_MASK] = uValue;
1144 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1145 return;
1146 }
1147 }
1148 }
1149
1150 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1151 outdated page pointer, or other troubles. (This will do a TLB load.) */
1152 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uNewRsp));
1153# endif
1154 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1155}
1156
1157
1158/**
1159 * 64-bit flat stack pop function that longjmps on error.
1160 */
1161DECL_INLINE_THROW(void)
1162RT_CONCAT3(iemMemFlat64StackPopGReg,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
1163{
1164 Assert(iGReg < 16);
1165# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1166 /*
1167 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1168 */
1169 uint64_t const uOldRsp = pVCpu->cpum.GstCtx.rsp;
1170# if TMPL_MEM_TYPE_SIZE > 1
1171 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldRsp)))
1172# endif
1173 {
1174 /*
1175 * TLB lookup.
1176 */
1177 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, uOldRsp);
1178 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1179 if (RT_LIKELY(pTlbe->uTag == uTag))
1180 {
1181 /*
1182 * Check TLB page table level access flags.
1183 */
1184 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1185 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1186 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1187 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1188 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1189 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1190 {
1191 /*
1192 * Do the push and return.
1193 */
1194 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1195 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1196 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1197 TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldRsp & GUEST_PAGE_OFFSET_MASK];
1198 pVCpu->cpum.GstCtx.rsp = uOldRsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */
1199# if TMPL_MEM_TYPE_SIZE == 2
1200 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
1201# elif TMPL_MEM_TYPE_SIZE == 8
1202 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
1203# else
1204# error "TMPL_MEM_TYPE_SIZE"
1205# endif
1206 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX64 (->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
1207 uOldRsp, uOldRsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg));
1208 return;
1209 }
1210 }
1211 }
1212
1213 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1214 outdated page pointer, or other troubles. (This will do a TLB load.) */
1215 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uOldRsp));
1216# endif
1217 RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
1218}
1219
1220# endif /* TMPL_MEM_TYPE_SIZE != 4 */
1221
1222# endif /* IEM_WITH_SETJMP */
1223# endif /* TMPL_MEM_WITH_STACK */
1224
1225
1226#endif /* IEM_WITH_SETJMP */
1227
1228#undef TMPL_MEM_TYPE
1229#undef TMPL_MEM_TYPE_ALIGN
1230#undef TMPL_MEM_TYPE_SIZE
1231#undef TMPL_MEM_FN_SUFF
1232#undef TMPL_MEM_FMT_TYPE
1233#undef TMPL_MEM_FMT_DESC
1234#undef TMPL_MEM_NO_STORE
1235#undef TMPL_MEM_ALIGN_CHECK
1236#undef TMPL_MEM_BY_REF
1237
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette