VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h@ 101189

Last change on this file since 101189 was 100868, checked in by vboxsync, 16 months ago

VBox/log.h,VMM/IEM: Added a dedicated logging group for IEM memory accesses: IEM_MEM bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 17.6 KB
Line 
1/* $Id: IEMAllMemRWTmpl.cpp.h 100868 2023-08-14 00:49:27Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_ALIGN
34# define TMPL_MEM_TYPE_ALIGN (sizeof(TMPL_MEM_TYPE) - 1)
35#endif
36#ifndef TMPL_MEM_FN_SUFF
37# error "TMPL_MEM_FN_SUFF is undefined"
38#endif
39#ifndef TMPL_MEM_FMT_TYPE
40# error "TMPL_MEM_FMT_TYPE is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_DESC
43# error "TMPL_MEM_FMT_DESC is undefined"
44#endif
45
46
47/**
48 * Standard fetch function.
49 *
50 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
51 * is defined.
52 */
53VBOXSTRICTRC RT_CONCAT(iemMemFetchData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puDst,
54 uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
55{
56 /* The lazy approach for now... */
57 TMPL_MEM_TYPE const *puSrc;
58 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, sizeof(*puSrc), iSegReg, GCPtrMem,
59 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
60 if (rc == VINF_SUCCESS)
61 {
62 *puDst = *puSrc;
63 rc = iemMemCommitAndUnmap(pVCpu, (void *)puSrc, IEM_ACCESS_DATA_R);
64 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, *puDst));
65 }
66 return rc;
67}
68
69
70#ifdef IEM_WITH_SETJMP
71/**
72 * Safe/fallback fetch function that longjmps on error.
73 */
74TMPL_MEM_TYPE
75RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
76{
77# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
78 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
79# endif
80 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, sizeof(*puSrc), iSegReg, GCPtrMem,
81 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
82 TMPL_MEM_TYPE const uRet = *puSrc;
83 iemMemCommitAndUnmapJmp(pVCpu, (void *)puSrc, IEM_ACCESS_DATA_R);
84 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet));
85 return uRet;
86}
87#endif /* IEM_WITH_SETJMP */
88
89
90
91/**
92 * Standard fetch function.
93 *
94 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
95 * is defined.
96 */
97VBOXSTRICTRC RT_CONCAT(iemMemStoreData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
98 TMPL_MEM_TYPE uValue) RT_NOEXCEPT
99{
100 /* The lazy approach for now... */
101 TMPL_MEM_TYPE *puDst;
102 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, sizeof(*puDst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
103 if (rc == VINF_SUCCESS)
104 {
105 *puDst = uValue;
106 rc = iemMemCommitAndUnmap(pVCpu, puDst, IEM_ACCESS_DATA_W);
107 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
108 }
109 return rc;
110}
111
112
113#ifdef IEM_WITH_SETJMP
114/**
115 * Stores a data byte, longjmp on error.
116 *
117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
118 * @param iSegReg The index of the segment register to use for
119 * this access. The base and limits are checked.
120 * @param GCPtrMem The address of the guest memory.
121 * @param u8Value The value to store.
122 */
123void RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
124 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
125{
126# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
127 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
128# endif
129 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
130 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(*puDst), iSegReg, GCPtrMem,
131 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
132 *puDst = uValue;
133 iemMemCommitAndUnmapJmp(pVCpu, puDst, IEM_ACCESS_DATA_W);
134}
135#endif /* IEM_WITH_SETJMP */
136
137
138#ifdef IEM_WITH_SETJMP
139
140/**
141 * Maps a data buffer for read+write direct access (or via a bounce buffer),
142 * longjmp on error.
143 *
144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
145 * @param pbUnmapInfo Pointer to unmap info variable.
146 * @param iSegReg The index of the segment register to use for
147 * this access. The base and limits are checked.
148 * @param GCPtrMem The address of the guest memory.
149 */
150TMPL_MEM_TYPE *
151RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
152 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
153{
154# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
155 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
156# endif
157 Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
158 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
159 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN);
160}
161
162
163/**
164 * Maps a data buffer for writeonly direct access (or via a bounce buffer),
165 * longjmp on error.
166 *
167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
168 * @param pbUnmapInfo Pointer to unmap info variable.
169 * @param iSegReg The index of the segment register to use for
170 * this access. The base and limits are checked.
171 * @param GCPtrMem The address of the guest memory.
172 */
173TMPL_MEM_TYPE *
174RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
175 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
176{
177# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
178 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
179# endif
180 Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
181 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); /* zero is for the TLB hit */
182 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
183}
184
185
186/**
187 * Maps a data buffer for readonly direct access (or via a bounce buffer),
188 * longjmp on error.
189 *
190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
191 * @param pbUnmapInfo Pointer to unmap info variable.
192 * @param iSegReg The index of the segment register to use for
193 * this access. The base and limits are checked.
194 * @param GCPtrMem The address of the guest memory.
195 */
196TMPL_MEM_TYPE const *
197RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
198 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
199{
200# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
201 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
202# endif
203 Log4(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
204 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */
205 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
206}
207
208#endif /* IEM_WITH_SETJMP */
209
210
211#ifdef TMPL_MEM_WITH_STACK
212
213/**
214 * Pushes an item onto the stack, regular version.
215 *
216 * @returns Strict VBox status code.
217 * @param pVCpu The cross context virtual CPU structure of the
218 * calling thread.
219 * @param uValue The value to push.
220 */
221VBOXSTRICTRC RT_CONCAT(iemMemStackPush,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) RT_NOEXCEPT
222{
223 /* Increment the stack pointer. */
224 uint64_t uNewRsp;
225 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
226
227 /* Write the dword the lazy way. */
228 TMPL_MEM_TYPE *puDst;
229 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
230 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
231 if (rc == VINF_SUCCESS)
232 {
233 *puDst = uValue;
234 rc = iemMemCommitAndUnmap(pVCpu, puDst, IEM_ACCESS_STACK_W);
235
236 /* Commit the new RSP value unless we an access handler made trouble. */
237 if (rc == VINF_SUCCESS)
238 {
239 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
240 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
241 pVCpu->cpum.GstCtx.rsp = uNewRsp;
242 return VINF_SUCCESS;
243 }
244 }
245
246 return rc;
247}
248
249
250/**
251 * Pops an item off the stack.
252 *
253 * @returns Strict VBox status code.
254 * @param pVCpu The cross context virtual CPU structure of the
255 * calling thread.
256 * @param puValue Where to store the popped value.
257 */
258VBOXSTRICTRC RT_CONCAT(iemMemStackPop,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue) RT_NOEXCEPT
259{
260 /* Increment the stack pointer. */
261 uint64_t uNewRsp;
262 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
263
264 /* Write the word the lazy way. */
265 TMPL_MEM_TYPE const *puSrc;
266 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
267 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
268 if (rc == VINF_SUCCESS)
269 {
270 *puValue = *puSrc;
271 rc = iemMemCommitAndUnmap(pVCpu, (void *)puSrc, IEM_ACCESS_STACK_R);
272
273 /* Commit the new RSP value. */
274 if (rc == VINF_SUCCESS)
275 {
276 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
277 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, *puValue));
278 pVCpu->cpum.GstCtx.rsp = uNewRsp;
279 return VINF_SUCCESS;
280 }
281 }
282 return rc;
283}
284
285
286/**
287 * Pushes an item onto the stack, using a temporary stack pointer.
288 *
289 * @returns Strict VBox status code.
290 * @param pVCpu The cross context virtual CPU structure of the
291 * calling thread.
292 * @param uValue The value to push.
293 * @param pTmpRsp Pointer to the temporary stack pointer.
294 */
295VBOXSTRICTRC RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
296{
297 /* Increment the stack pointer. */
298 RTUINT64U NewRsp = *pTmpRsp;
299 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
300
301 /* Write the word the lazy way. */
302 TMPL_MEM_TYPE *puDst;
303 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
304 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
305 if (rc == VINF_SUCCESS)
306 {
307 *puDst = uValue;
308 rc = iemMemCommitAndUnmap(pVCpu, puDst, IEM_ACCESS_STACK_W);
309
310 /* Commit the new RSP value unless we an access handler made trouble. */
311 if (rc == VINF_SUCCESS)
312 {
313 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
314 GCPtrTop, pTmpRsp->u, NewRsp.u, uValue));
315 *pTmpRsp = NewRsp;
316 return VINF_SUCCESS;
317 }
318 }
319 return rc;
320}
321
322
323/**
324 * Pops an item off the stack, using a temporary stack pointer.
325 *
326 * @returns Strict VBox status code.
327 * @param pVCpu The cross context virtual CPU structure of the
328 * calling thread.
329 * @param puValue Where to store the popped value.
330 * @param pTmpRsp Pointer to the temporary stack pointer.
331 */
332VBOXSTRICTRC
333RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
334{
335 /* Increment the stack pointer. */
336 RTUINT64U NewRsp = *pTmpRsp;
337 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
338
339 /* Write the word the lazy way. */
340 TMPL_MEM_TYPE const *puSrc;
341 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
342 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
343 if (rc == VINF_SUCCESS)
344 {
345 *puValue = *puSrc;
346 rc = iemMemCommitAndUnmap(pVCpu, (void *)puSrc, IEM_ACCESS_STACK_R);
347
348 /* Commit the new RSP value. */
349 if (rc == VINF_SUCCESS)
350 {
351 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
352 GCPtrTop, pTmpRsp->u, NewRsp.u, *puValue));
353 *pTmpRsp = NewRsp;
354 return VINF_SUCCESS;
355 }
356 }
357 return rc;
358}
359
360
361# ifdef IEM_WITH_SETJMP
362
363/**
364 * Safe/fallback stack push function that longjmps on error.
365 */
366void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
367{
368# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
369 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
370# endif
371
372 /* Decrement the stack pointer (prep). */
373 uint64_t uNewRsp;
374 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
375
376 /* Write the data. */
377 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
378 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
379 *puDst = uValue;
380 iemMemCommitAndUnmapJmp(pVCpu, puDst, IEM_ACCESS_STACK_W);
381
382 /* Commit the RSP change. */
383 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
384 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
385 pVCpu->cpum.GstCtx.rsp = uNewRsp;
386}
387
388
389/**
390 * Safe/fallback stack pop function that longjmps on error.
391 */
392TMPL_MEM_TYPE RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
393{
394# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
395 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
396# endif
397
398 /* Increment the stack pointer. */
399 uint64_t uNewRsp;
400 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
401
402 /* Read the data. */
403 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
404 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
405 TMPL_MEM_TYPE const uRet = *puSrc;
406 iemMemCommitAndUnmapJmp(pVCpu, (void *)puSrc, IEM_ACCESS_STACK_R);
407
408 /* Commit the RSP change and return the popped value. */
409 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
410 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uRet));
411 pVCpu->cpum.GstCtx.rsp = uNewRsp;
412
413 return uRet;
414}
415
416# ifdef TMPL_WITH_PUSH_SREG
417/**
418 * Safe/fallback stack push function that longjmps on error.
419 */
420void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
421{
422# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
423 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
424# endif
425
426 /* Decrement the stack pointer (prep). */
427 uint64_t uNewRsp;
428 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
429
430 /* Write the data. */
431 /* The intel docs talks about zero extending the selector register
432 value. My actual intel CPU here might be zero extending the value
433 but it still only writes the lower word... */
434 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
435 * happens when crossing an electric page boundrary, is the high word checked
436 * for write accessibility or not? Probably it is. What about segment limits?
437 * It appears this behavior is also shared with trap error codes.
438 *
439 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
440 * ancient hardware when it actually did change. */
441 uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(uint16_t), X86_SREG_SS, GCPtrTop,
442 IEM_ACCESS_STACK_W, sizeof(uint16_t) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
443 *puDst = (uint16_t)uValue;
444 iemMemCommitAndUnmapJmp(pVCpu, puDst, IEM_ACCESS_STACK_W);
445
446 /* Commit the RSP change. */
447 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
448 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
449 pVCpu->cpum.GstCtx.rsp = uNewRsp;
450}
451# endif /* TMPL_WITH_PUSH_SREG */
452
453# endif /* IEM_WITH_SETJMP */
454
455#endif /* TMPL_MEM_WITH_STACK */
456
457/* clean up */
458#undef TMPL_MEM_TYPE
459#undef TMPL_MEM_TYPE_ALIGN
460#undef TMPL_MEM_FN_SUFF
461#undef TMPL_MEM_FMT_TYPE
462#undef TMPL_MEM_FMT_DESC
463#undef TMPL_WITH_PUSH_SREG
464
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette