VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h@ 102555

Last change on this file since 102555 was 102430, checked in by vboxsync, 16 months ago

VMM/IEM: Refactored iemMemMap and friends to work with bUnmapInfo / bMapInfo. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.5 KB
Line 
1/* $Id: IEMAllMemRWTmpl.cpp.h 102430 2023-12-02 02:39:20Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_ALIGN
34# define TMPL_MEM_TYPE_ALIGN (sizeof(TMPL_MEM_TYPE) - 1)
35#endif
36#ifndef TMPL_MEM_FN_SUFF
37# error "TMPL_MEM_FN_SUFF is undefined"
38#endif
39#ifndef TMPL_MEM_FMT_TYPE
40# error "TMPL_MEM_FMT_TYPE is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_DESC
43# error "TMPL_MEM_FMT_DESC is undefined"
44#endif
45
46
47/**
48 * Standard fetch function.
49 *
50 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
51 * is defined.
52 */
53VBOXSTRICTRC RT_CONCAT(iemMemFetchData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puDst,
54 uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
55{
56 /* The lazy approach for now... */
57 uint8_t bUnmapInfo;
58 TMPL_MEM_TYPE const *puSrc;
59 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
60 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
61 if (rc == VINF_SUCCESS)
62 {
63 *puDst = *puSrc;
64 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
65 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, *puDst));
66 }
67 return rc;
68}
69
70
71#ifdef IEM_WITH_SETJMP
72/**
73 * Safe/fallback fetch function that longjmps on error.
74 */
75# ifdef TMPL_MEM_BY_REF
76void
77RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pDst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
78{
79# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
80 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
81# endif
82 uint8_t bUnmapInfo;
83 TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pSrc), iSegReg, GCPtrMem,
84 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
85 *pDst = *pSrc;
86 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
87 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pDst));
88}
89# else /* !TMPL_MEM_BY_REF */
90TMPL_MEM_TYPE
91RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
92{
93# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
94 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
95# endif
96 uint8_t bUnmapInfo;
97 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
98 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
99 TMPL_MEM_TYPE const uRet = *puSrc;
100 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
101 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet));
102 return uRet;
103}
104# endif /* !TMPL_MEM_BY_REF */
105#endif /* IEM_WITH_SETJMP */
106
107
108
109/**
110 * Standard store function.
111 *
112 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
113 * is defined.
114 */
115VBOXSTRICTRC RT_CONCAT(iemMemStoreData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
116#ifdef TMPL_MEM_BY_REF
117 TMPL_MEM_TYPE const *pValue) RT_NOEXCEPT
118#else
119 TMPL_MEM_TYPE uValue) RT_NOEXCEPT
120#endif
121{
122 /* The lazy approach for now... */
123 uint8_t bUnmapInfo;
124 TMPL_MEM_TYPE *puDst;
125 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(*puDst),
126 iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
127 if (rc == VINF_SUCCESS)
128 {
129#ifdef TMPL_MEM_BY_REF
130 *puDst = *pValue;
131#else
132 *puDst = uValue;
133#endif
134 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
135#ifdef TMPL_MEM_BY_REF
136 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
137#else
138 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
139#endif
140 }
141 return rc;
142}
143
144
145#ifdef IEM_WITH_SETJMP
146/**
147 * Stores a data byte, longjmp on error.
148 *
149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
150 * @param iSegReg The index of the segment register to use for
151 * this access. The base and limits are checked.
152 * @param GCPtrMem The address of the guest memory.
153 * @param uValue The value to store.
154 */
155void RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
156#ifdef TMPL_MEM_BY_REF
157 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
158#else
159 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
160#endif
161{
162# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
163 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
164# endif
165#ifdef TMPL_MEM_BY_REF
166 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
167#else
168 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
169#endif
170 uint8_t bUnmapInfo;
171 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puDst), iSegReg, GCPtrMem,
172 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
173#ifdef TMPL_MEM_BY_REF
174 *puDst = *pValue;
175#else
176 *puDst = uValue;
177#endif
178 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
179}
180#endif /* IEM_WITH_SETJMP */
181
182
183#ifdef IEM_WITH_SETJMP
184
185/**
186 * Maps a data buffer for read+write direct access (or via a bounce buffer),
187 * longjmp on error.
188 *
189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
190 * @param pbUnmapInfo Pointer to unmap info variable.
191 * @param iSegReg The index of the segment register to use for
192 * this access. The base and limits are checked.
193 * @param GCPtrMem The address of the guest memory.
194 */
195TMPL_MEM_TYPE *
196RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
197 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
198{
199# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
200 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
201# endif
202 Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
203 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
204 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
205 IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN);
206}
207
208
209/**
210 * Maps a data buffer for writeonly direct access (or via a bounce buffer),
211 * longjmp on error.
212 *
213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
214 * @param pbUnmapInfo Pointer to unmap info variable.
215 * @param iSegReg The index of the segment register to use for
216 * this access. The base and limits are checked.
217 * @param GCPtrMem The address of the guest memory.
218 */
219TMPL_MEM_TYPE *
220RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
221 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
222{
223# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
224 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
225# endif
226 Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
227 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); /* zero is for the TLB hit */
228 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
229 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
230}
231
232
233/**
234 * Maps a data buffer for readonly direct access (or via a bounce buffer),
235 * longjmp on error.
236 *
237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
238 * @param pbUnmapInfo Pointer to unmap info variable.
239 * @param iSegReg The index of the segment register to use for
240 * this access. The base and limits are checked.
241 * @param GCPtrMem The address of the guest memory.
242 */
243TMPL_MEM_TYPE const *
244RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
245 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
246{
247# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
248 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
249# endif
250 Log4(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
251 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */
252 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
253 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
254}
255
256#endif /* IEM_WITH_SETJMP */
257
258
259#ifdef TMPL_MEM_WITH_STACK
260
261/**
262 * Pushes an item onto the stack, regular version.
263 *
264 * @returns Strict VBox status code.
265 * @param pVCpu The cross context virtual CPU structure of the
266 * calling thread.
267 * @param uValue The value to push.
268 */
269VBOXSTRICTRC RT_CONCAT(iemMemStackPush,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) RT_NOEXCEPT
270{
271 /* Increment the stack pointer. */
272 uint64_t uNewRsp;
273 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
274
275 /* Write the dword the lazy way. */
276 uint8_t bUnmapInfo;
277 TMPL_MEM_TYPE *puDst;
278 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
279 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
280 if (rc == VINF_SUCCESS)
281 {
282 *puDst = uValue;
283 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
284
285 /* Commit the new RSP value unless we an access handler made trouble. */
286 if (rc == VINF_SUCCESS)
287 {
288 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
289 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
290 pVCpu->cpum.GstCtx.rsp = uNewRsp;
291 return VINF_SUCCESS;
292 }
293 }
294
295 return rc;
296}
297
298
299/**
300 * Pops an item off the stack.
301 *
302 * @returns Strict VBox status code.
303 * @param pVCpu The cross context virtual CPU structure of the
304 * calling thread.
305 * @param puValue Where to store the popped value.
306 */
307VBOXSTRICTRC RT_CONCAT(iemMemStackPop,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue) RT_NOEXCEPT
308{
309 /* Increment the stack pointer. */
310 uint64_t uNewRsp;
311 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
312
313 /* Write the word the lazy way. */
314 uint8_t bUnmapInfo;
315 TMPL_MEM_TYPE const *puSrc;
316 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
317 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
318 if (rc == VINF_SUCCESS)
319 {
320 *puValue = *puSrc;
321 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
322
323 /* Commit the new RSP value. */
324 if (rc == VINF_SUCCESS)
325 {
326 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
327 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, *puValue));
328 pVCpu->cpum.GstCtx.rsp = uNewRsp;
329 return VINF_SUCCESS;
330 }
331 }
332 return rc;
333}
334
335
336/**
337 * Pushes an item onto the stack, using a temporary stack pointer.
338 *
339 * @returns Strict VBox status code.
340 * @param pVCpu The cross context virtual CPU structure of the
341 * calling thread.
342 * @param uValue The value to push.
343 * @param pTmpRsp Pointer to the temporary stack pointer.
344 */
345VBOXSTRICTRC RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
346{
347 /* Increment the stack pointer. */
348 RTUINT64U NewRsp = *pTmpRsp;
349 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
350
351 /* Write the word the lazy way. */
352 uint8_t bUnmapInfo;
353 TMPL_MEM_TYPE *puDst;
354 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
355 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
356 if (rc == VINF_SUCCESS)
357 {
358 *puDst = uValue;
359 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
360
361 /* Commit the new RSP value unless we an access handler made trouble. */
362 if (rc == VINF_SUCCESS)
363 {
364 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
365 GCPtrTop, pTmpRsp->u, NewRsp.u, uValue));
366 *pTmpRsp = NewRsp;
367 return VINF_SUCCESS;
368 }
369 }
370 return rc;
371}
372
373
374/**
375 * Pops an item off the stack, using a temporary stack pointer.
376 *
377 * @returns Strict VBox status code.
378 * @param pVCpu The cross context virtual CPU structure of the
379 * calling thread.
380 * @param puValue Where to store the popped value.
381 * @param pTmpRsp Pointer to the temporary stack pointer.
382 */
383VBOXSTRICTRC
384RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
385{
386 /* Increment the stack pointer. */
387 RTUINT64U NewRsp = *pTmpRsp;
388 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
389
390 /* Write the word the lazy way. */
391 uint8_t bUnmapInfo;
392 TMPL_MEM_TYPE const *puSrc;
393 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
394 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
395 if (rc == VINF_SUCCESS)
396 {
397 *puValue = *puSrc;
398 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
399
400 /* Commit the new RSP value. */
401 if (rc == VINF_SUCCESS)
402 {
403 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
404 GCPtrTop, pTmpRsp->u, NewRsp.u, *puValue));
405 *pTmpRsp = NewRsp;
406 return VINF_SUCCESS;
407 }
408 }
409 return rc;
410}
411
412
413# ifdef IEM_WITH_SETJMP
414
415/**
416 * Safe/fallback stack push function that longjmps on error.
417 */
418void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
419{
420# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
421 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
422# endif
423
424 /* Decrement the stack pointer (prep). */
425 uint64_t uNewRsp;
426 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
427
428 /* Write the data. */
429 uint8_t bUnmapInfo;
430 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
431 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
432 *puDst = uValue;
433 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
434
435 /* Commit the RSP change. */
436 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
437 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
438 pVCpu->cpum.GstCtx.rsp = uNewRsp;
439}
440
441
442/**
443 * Safe/fallback stack pop function that longjmps on error.
444 */
445TMPL_MEM_TYPE RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
446{
447# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
448 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
449# endif
450
451 /* Increment the stack pointer. */
452 uint64_t uNewRsp;
453 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
454
455 /* Read the data. */
456 uint8_t bUnmapInfo;
457 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,
458 GCPtrTop, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
459 TMPL_MEM_TYPE const uRet = *puSrc;
460 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
461
462 /* Commit the RSP change and return the popped value. */
463 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
464 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uRet));
465 pVCpu->cpum.GstCtx.rsp = uNewRsp;
466
467 return uRet;
468}
469
470# ifdef TMPL_WITH_PUSH_SREG
471/**
472 * Safe/fallback stack push function that longjmps on error.
473 */
474void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
475{
476# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
477 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
478# endif
479
480 /* Decrement the stack pointer (prep). */
481 uint64_t uNewRsp;
482 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
483
484 /* Write the data. */
485 /* The intel docs talks about zero extending the selector register
486 value. My actual intel CPU here might be zero extending the value
487 but it still only writes the lower word... */
488 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
489 * happens when crossing an electric page boundrary, is the high word checked
490 * for write accessibility or not? Probably it is. What about segment limits?
491 * It appears this behavior is also shared with trap error codes.
492 *
493 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
494 * ancient hardware when it actually did change. */
495 uint8_t bUnmapInfo;
496 uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrTop,
497 IEM_ACCESS_STACK_W, sizeof(uint16_t) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
498 *puDst = (uint16_t)uValue;
499 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
500
501 /* Commit the RSP change. */
502 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
503 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
504 pVCpu->cpum.GstCtx.rsp = uNewRsp;
505}
506# endif /* TMPL_WITH_PUSH_SREG */
507
508# endif /* IEM_WITH_SETJMP */
509
510#endif /* TMPL_MEM_WITH_STACK */
511
512/* clean up */
513#undef TMPL_MEM_TYPE
514#undef TMPL_MEM_TYPE_ALIGN
515#undef TMPL_MEM_FN_SUFF
516#undef TMPL_MEM_FMT_TYPE
517#undef TMPL_MEM_FMT_DESC
518#undef TMPL_WITH_PUSH_SREG
519
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette