VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h@ 102311

Last change on this file since 102311 was 102311, checked in by vboxsync, 12 months ago

VMM/IEM: Better memory access logging. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 52.8 KB
Line 
1/* $Id: IEMAllMemRWTmplInline.cpp.h 102311 2023-11-27 12:59:07Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_SIZE
34# error "TMPL_MEM_TYPE_SIZE is undefined"
35#endif
36#ifndef TMPL_MEM_TYPE_ALIGN
37# error "TMPL_MEM_TYPE_ALIGN is undefined"
38#endif
39#ifndef TMPL_MEM_FN_SUFF
40# error "TMPL_MEM_FN_SUFF is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_TYPE
43# error "TMPL_MEM_FMT_TYPE is undefined"
44#endif
45#ifndef TMPL_MEM_FMT_DESC
46# error "TMPL_MEM_FMT_DESC is undefined"
47#endif
48
49#if TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
50# error Have not implemented TMPL_MEM_TYPE_ALIGN smaller than TMPL_MEM_TYPE_SIZE - 1.
51#endif
52
53
54#ifdef IEM_WITH_SETJMP
55
56
57/*********************************************************************************************************************************
58* Fetches *
59*********************************************************************************************************************************/
60
61/**
62 * Inlined fetch function that longjumps on error.
63 *
64 * @note The @a iSegRef is not allowed to be UINT8_MAX!
65 */
66DECL_INLINE_THROW(TMPL_MEM_TYPE)
67RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
68{
69 AssertCompile(sizeof(TMPL_MEM_TYPE) == TMPL_MEM_TYPE_SIZE);
70# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
71 /*
72 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
73 */
74 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
75# if TMPL_MEM_TYPE_SIZE > 1
76 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */
77 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
78# endif
79 {
80 /*
81 * TLB lookup.
82 */
83 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
84 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
85 if (RT_LIKELY(pTlbe->uTag == uTag))
86 {
87 /*
88 * Check TLB page table level access flags.
89 */
90 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
91 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
92 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
93 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
94 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
95 {
96 /*
97 * Fetch and return the data.
98 */
99 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
100 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
101 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
102 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
103 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE "\n",
104 iSegReg, GCPtrMem, GCPtrEff, uRet));
105 return uRet;
106 }
107 }
108 }
109
110 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
111 outdated page pointer, or other troubles. (This will do a TLB load.) */
112 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
113# endif
114 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem);
115}
116
117
118/**
119 * Inlined flat addressing fetch function that longjumps on error.
120 */
121DECL_INLINE_THROW(TMPL_MEM_TYPE)
122RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
123{
124# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
125 /*
126 * Check that it doesn't cross a page boundrary.
127 */
128# if TMPL_MEM_TYPE_SIZE > 1
129 AssertCompile(X86_CR0_AM == X86_EFL_AC);
130 AssertCompile(((3U + 1U) << 16) == X86_CR0_AM);
131 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */
132 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) ))
133# endif
134 {
135 /*
136 * TLB lookup.
137 */
138 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
139 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
140 if (RT_LIKELY(pTlbe->uTag == uTag))
141 {
142 /*
143 * Check TLB page table level access flags.
144 */
145 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
146 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
147 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
148 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
149 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
150 {
151 /*
152 * Fetch and return the dword
153 */
154 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
155 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
156 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
157 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
158 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet));
159 return uRet;
160 }
161 }
162 }
163
164 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
165 outdated page pointer, or other troubles. (This will do a TLB load.) */
166 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
167# endif
168 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem);
169}
170
171
172/*********************************************************************************************************************************
173* Stores *
174*********************************************************************************************************************************/
175# ifndef TMPL_MEM_NO_STORE
176
177/**
178 * Inlined store function that longjumps on error.
179 *
180 * @note The @a iSegRef is not allowed to be UINT8_MAX!
181 */
182DECL_INLINE_THROW(void)
183RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
184 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
185{
186# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
187 /*
188 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
189 */
190 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
191# if TMPL_MEM_TYPE_SIZE > 1
192 AssertCompile(X86_CR0_AM == X86_EFL_AC);
193 AssertCompile(((3U + 1U) << 16) == X86_CR0_AM);
194 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */
195 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
196# endif
197 {
198 /*
199 * TLB lookup.
200 */
201 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
202 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
203 if (RT_LIKELY(pTlbe->uTag == uTag))
204 {
205 /*
206 * Check TLB page table level access flags.
207 */
208 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
209 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
210 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
211 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
212 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
213 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
214 {
215 /*
216 * Store the dword and return.
217 */
218 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
219 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
220 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
221 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
222 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE " (%04x:%RX64)\n",
223 iSegReg, GCPtrMem, GCPtrEff, uValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
224 return;
225 }
226 }
227 }
228
229 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
230 outdated page pointer, or other troubles. (This will do a TLB load.) */
231 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
232# endif
233 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, uValue);
234}
235
236
237/**
238 * Inlined flat addressing store function that longjumps on error.
239 */
240DECL_INLINE_THROW(void)
241RT_CONCAT3(iemMemFlatStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
242 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
243{
244# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
245 /*
246 * Check that it doesn't cross a page boundrary.
247 */
248# if TMPL_MEM_TYPE_SIZE > 1
249 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN)
250 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) ))
251# endif
252 {
253 /*
254 * TLB lookup.
255 */
256 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
257 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
258 if (RT_LIKELY(pTlbe->uTag == uTag))
259 {
260 /*
261 * Check TLB page table level access flags.
262 */
263 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
264 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
265 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
266 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
267 | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
268 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
269 {
270 /*
271 * Store the dword and return.
272 */
273 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
274 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
275 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
276 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
277 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
278 return;
279 }
280 }
281 }
282
283 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
284 outdated page pointer, or other troubles. (This will do a TLB load.) */
285 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
286# endif
287 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, uValue);
288}
289
290# endif /* !TMPL_MEM_NO_STORE */
291
292
293/*********************************************************************************************************************************
294* Mapping / Direct Memory Access *
295*********************************************************************************************************************************/
296# ifndef TMPL_MEM_NO_MAPPING
297
298/**
299 * Inlined read-write memory mapping function that longjumps on error.
300 */
301DECL_INLINE_THROW(TMPL_MEM_TYPE *)
302RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
303 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
304{
305# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
306 /*
307 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
308 */
309 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
310# if TMPL_MEM_TYPE_SIZE > 1
311 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN)
312 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
313# endif
314 {
315 /*
316 * TLB lookup.
317 */
318 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
319 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
320 if (RT_LIKELY(pTlbe->uTag == uTag))
321 {
322 /*
323 * Check TLB page table level access flags.
324 */
325 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
326 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
327 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
328 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
329 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
330 | fNoUser))
331 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
332 {
333 /*
334 * Return the address.
335 */
336 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
337 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
338 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
339 *pbUnmapInfo = 0;
340 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
341 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
342 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
343 }
344 }
345 }
346
347 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
348 outdated page pointer, or other troubles. (This will do a TLB load.) */
349 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
350# endif
351 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
352}
353
354
355/**
356 * Inlined flat read-write memory mapping function that longjumps on error.
357 */
358DECL_INLINE_THROW(TMPL_MEM_TYPE *)
359RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RwJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
360 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
361{
362# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
363 /*
364 * Check that the address doesn't cross a page boundrary.
365 */
366# if TMPL_MEM_TYPE_SIZE > 1
367 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN)
368 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) ))
369# endif
370 {
371 /*
372 * TLB lookup.
373 */
374 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
375 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
376 if (RT_LIKELY(pTlbe->uTag == uTag))
377 {
378 /*
379 * Check TLB page table level access flags.
380 */
381 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
382 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
383 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
384 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
385 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
386 | fNoUser))
387 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
388 {
389 /*
390 * Return the address.
391 */
392 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
393 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
394 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
395 *pbUnmapInfo = 0;
396 Log7Ex(LOG_GROUP_IEM_MEM,("IEM RW/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
397 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
398 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
399 }
400 }
401 }
402
403 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
404 outdated page pointer, or other troubles. (This will do a TLB load.) */
405 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
406# endif
407 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
408}
409
410
411/**
412 * Inlined write-only memory mapping function that longjumps on error.
413 */
414DECL_INLINE_THROW(TMPL_MEM_TYPE *)
415RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
416 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
417{
418# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
419 /*
420 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
421 */
422 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
423# if TMPL_MEM_TYPE_SIZE > 1
424 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN)
425 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
426# endif
427 {
428 /*
429 * TLB lookup.
430 */
431 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
432 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
433 if (RT_LIKELY(pTlbe->uTag == uTag))
434 {
435 /*
436 * Check TLB page table level access flags.
437 */
438 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
439 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
440 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
441 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
442 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE
443 | fNoUser))
444 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
445 {
446 /*
447 * Return the address.
448 */
449 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
450 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
451 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
452 *pbUnmapInfo = 0;
453 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
454 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
455 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
456 }
457 }
458 }
459
460 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
461 outdated page pointer, or other troubles. (This will do a TLB load.) */
462 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
463# endif
464 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
465}
466
467
468/**
469 * Inlined flat write-only memory mapping function that longjumps on error.
470 */
471DECL_INLINE_THROW(TMPL_MEM_TYPE *)
472RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,WoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
473 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
474{
475# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
476 /*
477 * Check that the address doesn't cross a page boundrary.
478 */
479# if TMPL_MEM_TYPE_SIZE > 1
480 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN)
481 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) ))
482# endif
483 {
484 /*
485 * TLB lookup.
486 */
487 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
488 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
489 if (RT_LIKELY(pTlbe->uTag == uTag))
490 {
491 /*
492 * Check TLB page table level access flags.
493 */
494 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
495 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
496 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
497 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
498 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
499 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
500 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
501 {
502 /*
503 * Return the address.
504 */
505 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
506 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
507 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
508 *pbUnmapInfo = 0;
509 Log7Ex(LOG_GROUP_IEM_MEM,("IEM WO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
510 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
511 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
512 }
513 }
514 }
515
516 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
517 outdated page pointer, or other troubles. (This will do a TLB load.) */
518 Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
519# endif
520 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
521}
522
523
524/**
525 * Inlined read-only memory mapping function that longjumps on error.
526 */
527DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
528RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
529 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
530{
531# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
532 /*
533 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
534 */
535 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
536# if TMPL_MEM_TYPE_SIZE > 1
537 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN)
538 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
539# endif
540 {
541 /*
542 * TLB lookup.
543 */
544 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
545 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
546 if (RT_LIKELY(pTlbe->uTag == uTag))
547 {
548 /*
549 * Check TLB page table level access flags.
550 */
551 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
552 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
553 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
554 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
555 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
556 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
557 {
558 /*
559 * Return the address.
560 */
561 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
562 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
563 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
564 *pbUnmapInfo = 0;
565 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %p\n",
566 iSegReg, GCPtrMem, GCPtrEff, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]));
567 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
568 }
569 }
570 }
571
572 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
573 outdated page pointer, or other troubles. (This will do a TLB load.) */
574 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
575# endif
576 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
577}
578
579
580/**
581 * Inlined read-only memory mapping function that longjumps on error.
582 */
583DECL_INLINE_THROW(TMPL_MEM_TYPE const *)
584RT_CONCAT3(iemMemFlatMapData,TMPL_MEM_FN_SUFF,RoJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
585 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
586{
587# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
588 /*
589 * Check that the address doesn't cross a page boundrary.
590 */
591# if TMPL_MEM_TYPE_SIZE > 1
592 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN)
593 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) ))
594# endif
595 {
596 /*
597 * TLB lookup.
598 */
599 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
600 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
601 if (RT_LIKELY(pTlbe->uTag == uTag))
602 {
603 /*
604 * Check TLB page table level access flags.
605 */
606 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
607 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
608 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
609 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
610 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
611 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
612 {
613 /*
614 * Return the address.
615 */
616 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
617 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
618 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
619 *pbUnmapInfo = 0;
620 Log3Ex(LOG_GROUP_IEM_MEM,("IEM RO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",
621 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]));
622 return (TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
623 }
624 }
625 }
626
627 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
628 outdated page pointer, or other troubles. (This will do a TLB load.) */
629 Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
630# endif
631 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
632}
633
634# endif /* !TMPL_MEM_NO_MAPPING */
635
636
637/*********************************************************************************************************************************
638* Stack Access *
639*********************************************************************************************************************************/
640# ifdef TMPL_MEM_WITH_STACK
641# ifdef IEM_WITH_SETJMP
642
643/**
644 * Stack push function that longjmps on error.
645 */
646DECL_INLINE_THROW(void)
647RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
648{
649# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
650 /*
651 * Decrement the stack pointer (prep), apply segmentation and check that
652 * the item doesn't cross a page boundrary.
653 */
654 uint64_t uNewRsp;
655 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
656 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
657# if TMPL_MEM_TYPE_SIZE > 1
658 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN)
659 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
660# endif
661 {
662 /*
663 * TLB lookup.
664 */
665 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
666 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
667 if (RT_LIKELY(pTlbe->uTag == uTag))
668 {
669 /*
670 * Check TLB page table level access flags.
671 */
672 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
673 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
674 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
675 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
676 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
677 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
678 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
679 {
680 /*
681 * Do the push and return.
682 */
683 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
684 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
685 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
686 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
687 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
688 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
689 pVCpu->cpum.GstCtx.rsp = uNewRsp;
690 return;
691 }
692 }
693 }
694
695 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
696 outdated page pointer, or other troubles. (This will do a TLB load.) */
697 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
698# endif
699 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
700}
701
702
703/**
704 * Stack pop function that longjmps on error.
705 */
706DECL_INLINE_THROW(TMPL_MEM_TYPE)
707RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
708{
709# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
710 /*
711 * Increment the stack pointer (prep), apply segmentation and check that
712 * the item doesn't cross a page boundrary.
713 */
714 uint64_t uNewRsp;
715 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
716 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
717# if TMPL_MEM_TYPE_SIZE > 1
718 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN)
719 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ))
720# endif
721 {
722 /*
723 * TLB lookup.
724 */
725 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
726 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
727 if (RT_LIKELY(pTlbe->uTag == uTag))
728 {
729 /*
730 * Check TLB page table level access flags.
731 */
732 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
733 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
734 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
735 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
736 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
737 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
738 {
739 /*
740 * Do the push and return.
741 */
742 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
743 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
744 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
745 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
746 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
747 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uRet));
748 pVCpu->cpum.GstCtx.rsp = uNewRsp;
749 return uRet;
750 }
751 }
752 }
753
754 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
755 outdated page pointer, or other troubles. (This will do a TLB load.) */
756 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
757# endif
758 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu);
759}
760
761# ifdef TMPL_WITH_PUSH_SREG
762/**
763 * Stack segment push function that longjmps on error.
764 *
765 * For a detailed discussion of the behaviour see the fallback functions
766 * iemMemStackPushUxxSRegSafeJmp.
767 */
768DECL_INLINE_THROW(void)
769RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
770{
771# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
772 /*
773 * Decrement the stack pointer (prep), apply segmentation and check that
774 * the item doesn't cross a page boundrary.
775 */
776 uint64_t uNewRsp;
777 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
778 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop);
779# if TMPL_MEM_TYPE_SIZE > 1
780 if (RT_LIKELY( !(GCPtrEff & (sizeof(uint16_t) - 1U))
781 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) ))
782# endif
783 {
784 /*
785 * TLB lookup.
786 */
787 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
788 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
789 if (RT_LIKELY(pTlbe->uTag == uTag))
790 {
791 /*
792 * Check TLB page table level access flags.
793 */
794 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
795 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
796 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
797 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
798 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
799 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
800 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
801 {
802 /*
803 * Do the push and return.
804 */
805 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
806 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
807 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
808 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
809 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
810 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
811 pVCpu->cpum.GstCtx.rsp = uNewRsp;
812 return;
813 }
814 }
815 }
816
817 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
818 outdated page pointer, or other troubles. (This will do a TLB load.) */
819 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
820# endif
821 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
822}
823
824# endif
825# if TMPL_MEM_TYPE_SIZE != 8
826
827/**
828 * 32-bit flat stack push function that longjmps on error.
829 */
830DECL_INLINE_THROW(void)
831RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
832{
833 Assert( pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig
834 && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC
835 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
836 && pVCpu->cpum.GstCtx.ss.u64Base == 0);
837# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
838 /*
839 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
840 */
841 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
842# if TMPL_MEM_TYPE_SIZE > 1
843 if (RT_LIKELY( !(uNewEsp & TMPL_MEM_TYPE_ALIGN)
844 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, TMPL_MEM_TYPE) ))
845# endif
846 {
847 /*
848 * TLB lookup.
849 */
850 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
851 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
852 if (RT_LIKELY(pTlbe->uTag == uTag))
853 {
854 /*
855 * Check TLB page table level access flags.
856 */
857 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
858 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
859 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
860 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
861 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
862 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
863 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
864 {
865 /*
866 * Do the push and return.
867 */
868 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
869 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
870 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
871 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE "\n",
872 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
873 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = uValue;
874 pVCpu->cpum.GstCtx.rsp = uNewEsp;
875 return;
876 }
877 }
878 }
879
880 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
881 outdated page pointer, or other troubles. (This will do a TLB load.) */
882 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
883# endif
884 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
885}
886
887
888/**
889 * 32-bit flat stack pop function that longjmps on error.
890 */
891DECL_INLINE_THROW(TMPL_MEM_TYPE)
892RT_CONCAT3(iemMemFlat32StackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
893{
894# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
895 /*
896 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
897 */
898 uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp;
899# if TMPL_MEM_TYPE_SIZE > 1
900 if (RT_LIKELY( !(uOldEsp & TMPL_MEM_TYPE_ALIGN)
901 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uOldEsp, TMPL_MEM_TYPE) ))
902# endif
903 {
904 /*
905 * TLB lookup.
906 */
907 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uOldEsp); /* Cast is required! 2023-08-11 */
908 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
909 if (RT_LIKELY(pTlbe->uTag == uTag))
910 {
911 /*
912 * Check TLB page table level access flags.
913 */
914 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
915 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
916 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
917 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
918 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
919 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
920 {
921 /*
922 * Do the push and return.
923 */
924 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
925 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
926 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
927 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK];
928 pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE);
929 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE "\n",
930 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uRet));
931 return uRet;
932 }
933 }
934 }
935
936 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
937 outdated page pointer, or other troubles. (This will do a TLB load.) */
938 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uOldEsp));
939# endif
940 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu);
941}
942
943# endif /* TMPL_MEM_TYPE_SIZE != 8*/
944# ifdef TMPL_WITH_PUSH_SREG
945/**
946 * 32-bit flat stack segment push function that longjmps on error.
947 *
948 * For a detailed discussion of the behaviour see the fallback functions
949 * iemMemStackPushUxxSRegSafeJmp.
950 */
951DECL_INLINE_THROW(void)
952RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
953{
954# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
955 /*
956 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
957 */
958 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
959 if (RT_LIKELY( !(uNewEsp & (sizeof(uint16_t) - 1))
960 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, uint16_t) ))
961 {
962 /*
963 * TLB lookup.
964 */
965 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */
966 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
967 if (RT_LIKELY(pTlbe->uTag == uTag))
968 {
969 /*
970 * Check TLB page table level access flags.
971 */
972 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
973 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
974 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
975 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
976 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
977 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
978 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
979 {
980 /*
981 * Do the push and return.
982 */
983 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
984 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
985 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
986 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE " [sreg]\n",
987 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue));
988 *(uint16_t *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
989 pVCpu->cpum.GstCtx.rsp = uNewEsp;
990 return;
991 }
992 }
993 }
994
995 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
996 outdated page pointer, or other troubles. (This will do a TLB load.) */
997 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
998# endif
999 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
1000}
1001
1002# endif
1003# if TMPL_MEM_TYPE_SIZE != 4
1004
1005/**
1006 * 64-bit flat stack push function that longjmps on error.
1007 */
1008DECL_INLINE_THROW(void)
1009RT_CONCAT3(iemMemFlat64StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
1010{
1011# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1012 /*
1013 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1014 */
1015 uint64_t const uNewRsp = pVCpu->cpum.GstCtx.rsp - sizeof(TMPL_MEM_TYPE);
1016# if TMPL_MEM_TYPE_SIZE > 1
1017 if (RT_LIKELY( !(uNewRsp & TMPL_MEM_TYPE_ALIGN)
1018 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewRsp, TMPL_MEM_TYPE) ))
1019# endif
1020 {
1021 /*
1022 * TLB lookup.
1023 */
1024 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, uNewRsp);
1025 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1026 if (RT_LIKELY(pTlbe->uTag == uTag))
1027 {
1028 /*
1029 * Check TLB page table level access flags.
1030 */
1031 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1032 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1033 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1034 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
1035 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
1036 | IEMTLBE_F_PT_NO_WRITE | fNoUser))
1037 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1038 {
1039 /*
1040 * Do the push and return.
1041 */
1042 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1043 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1044 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1045 Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX64 (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",
1046 uNewRsp, pVCpu->cpum.GstCtx.esp, uValue));
1047 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewRsp & GUEST_PAGE_OFFSET_MASK] = uValue;
1048 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1049 return;
1050 }
1051 }
1052 }
1053
1054 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1055 outdated page pointer, or other troubles. (This will do a TLB load.) */
1056 Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uNewRsp));
1057# endif
1058 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
1059}
1060
1061
1062/**
1063 * 64-bit flat stack pop function that longjmps on error.
1064 */
1065DECL_INLINE_THROW(TMPL_MEM_TYPE)
1066RT_CONCAT3(iemMemFlat64StackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1067{
1068# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
1069 /*
1070 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
1071 */
1072 uint64_t const uOldRsp = pVCpu->cpum.GstCtx.rsp;
1073# if TMPL_MEM_TYPE_SIZE > 1
1074 if (RT_LIKELY( !(uOldRsp & TMPL_MEM_TYPE_ALIGN)
1075 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uOldRsp, TMPL_MEM_TYPE) ))
1076# endif
1077 {
1078 /*
1079 * TLB lookup.
1080 */
1081 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, uOldRsp);
1082 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
1083 if (RT_LIKELY(pTlbe->uTag == uTag))
1084 {
1085 /*
1086 * Check TLB page table level access flags.
1087 */
1088 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
1089 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
1090 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3
1091 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
1092 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser))
1093 == pVCpu->iem.s.DataTlb.uTlbPhysRev))
1094 {
1095 /*
1096 * Do the push and return.
1097 */
1098 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
1099 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
1100 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
1101 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldRsp & GUEST_PAGE_OFFSET_MASK];
1102 pVCpu->cpum.GstCtx.rsp = uOldRsp + sizeof(TMPL_MEM_TYPE);
1103 Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX64 (->%RX64): " TMPL_MEM_FMT_TYPE "\n",
1104 uOldRsp, uOldRsp + sizeof(TMPL_MEM_TYPE), uRet));
1105 return uRet;
1106 }
1107 }
1108 }
1109
1110 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
1111 outdated page pointer, or other troubles. (This will do a TLB load.) */
1112 Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uOldRsp));
1113# endif
1114 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu);
1115}
1116
1117#endif /* TMPL_MEM_TYPE_SIZE != 4 */
1118
1119# endif /* IEM_WITH_SETJMP */
1120# endif /* TMPL_MEM_WITH_STACK */
1121
1122
1123#endif /* IEM_WITH_SETJMP */
1124
1125#undef TMPL_MEM_TYPE
1126#undef TMPL_MEM_TYPE_ALIGN
1127#undef TMPL_MEM_TYPE_SIZE
1128#undef TMPL_MEM_FN_SUFF
1129#undef TMPL_MEM_FMT_TYPE
1130#undef TMPL_MEM_FMT_DESC
1131#undef TMPL_MEM_NO_STORE
1132
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette