VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllMemRWTmpl-x86.cpp.h@ 108434

Last change on this file since 108434 was 108278, checked in by vboxsync, 3 months ago

VMM/IEM: Removed the #ifndef IEM_WITH_SETJMP code. We've had IEM_WITH_SETJMP defined unconditionally since 7.0 and the code probably doesn't even compile w/o it, so best remove the unused code. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 27.3 KB
Line 
1/* $Id: IEMAllMemRWTmpl-x86.cpp.h 108278 2025-02-18 15:46:53Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_ALIGN
34# define TMPL_MEM_TYPE_ALIGN (sizeof(TMPL_MEM_TYPE) - 1)
35#endif
36#ifndef TMPL_MEM_FN_SUFF
37# error "TMPL_MEM_FN_SUFF is undefined"
38#endif
39#ifndef TMPL_MEM_FMT_TYPE
40# error "TMPL_MEM_FMT_TYPE is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_DESC
43# error "TMPL_MEM_FMT_DESC is undefined"
44#endif
45#ifndef TMPL_MEM_MAP_FLAGS_ADD
46# define TMPL_MEM_MAP_FLAGS_ADD (0)
47#endif
48
49
50/**
51 * Standard fetch function.
52 *
53 * This is used by CImpl code.
54 */
55VBOXSTRICTRC RT_CONCAT(iemMemFetchData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puDst,
56 uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
57{
58 /* The lazy approach for now... */
59 uint8_t bUnmapInfo;
60 TMPL_MEM_TYPE const *puSrc;
61 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
62 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
63 if (rc == VINF_SUCCESS)
64 {
65 *puDst = *puSrc;
66 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
67 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, *puDst));
68 }
69 return rc;
70}
71
72
73/**
74 * Safe/fallback fetch function that longjmps on error.
75 */
76#ifdef TMPL_MEM_BY_REF
77void
78RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pDst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
79{
80# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
81 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
82# endif
83 uint8_t bUnmapInfo;
84 TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*pSrc), iSegReg, GCPtrMem,
85 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
86 *pDst = *pSrc;
87 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
88 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pDst));
89}
90#else /* !TMPL_MEM_BY_REF */
91TMPL_MEM_TYPE
92RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
93{
94# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
95 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
96# endif
97 uint8_t bUnmapInfo;
98 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
99 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
100 TMPL_MEM_TYPE const uRet = *puSrc;
101 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
102 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet));
103 return uRet;
104}
105#endif /* !TMPL_MEM_BY_REF */
106
107
108
109/**
110 * Standard store function.
111 *
112 * This is used by CImpl code.
113 */
114VBOXSTRICTRC RT_CONCAT(iemMemStoreData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
115#ifdef TMPL_MEM_BY_REF
116 TMPL_MEM_TYPE const *pValue) RT_NOEXCEPT
117#else
118 TMPL_MEM_TYPE uValue) RT_NOEXCEPT
119#endif
120{
121 /* The lazy approach for now... */
122 uint8_t bUnmapInfo;
123 TMPL_MEM_TYPE *puDst;
124 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(*puDst),
125 iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
126 if (rc == VINF_SUCCESS)
127 {
128#ifdef TMPL_MEM_BY_REF
129 *puDst = *pValue;
130#else
131 *puDst = uValue;
132#endif
133 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
134#ifdef TMPL_MEM_BY_REF
135 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
136#else
137 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
138#endif
139 }
140 return rc;
141}
142
143
144/**
145 * Stores a data byte, longjmp on error.
146 *
147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
148 * @param iSegReg The index of the segment register to use for
149 * this access. The base and limits are checked.
150 * @param GCPtrMem The address of the guest memory.
151 * @param uValue The value to store.
152 */
153void RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
154#ifdef TMPL_MEM_BY_REF
155 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
156#else
157 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
158#endif
159{
160#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
161 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
162#endif
163#ifdef TMPL_MEM_BY_REF
164 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
165#else
166 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
167#endif
168 uint8_t bUnmapInfo;
169 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*puDst), iSegReg, GCPtrMem,
170 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
171#ifdef TMPL_MEM_BY_REF
172 *puDst = *pValue;
173#else
174 *puDst = uValue;
175#endif
176 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
177}
178
179
180/**
181 * Maps a data buffer for atomic read+write direct access (or via a bounce
182 * buffer), longjmp on error.
183 *
184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
185 * @param pbUnmapInfo Pointer to unmap info variable.
186 * @param iSegReg The index of the segment register to use for
187 * this access. The base and limits are checked.
188 * @param GCPtrMem The address of the guest memory.
189 */
190TMPL_MEM_TYPE *
191RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,AtSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
192 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
193{
194#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
195 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
196#endif
197 Log8(("IEM AT/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
198 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
199 return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
200 IEM_ACCESS_DATA_ATOMIC, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
201}
202
203
204/**
205 * Maps a data buffer for read+write direct access (or via a bounce buffer),
206 * longjmp on error.
207 *
208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
209 * @param pbUnmapInfo Pointer to unmap info variable.
210 * @param iSegReg The index of the segment register to use for
211 * this access. The base and limits are checked.
212 * @param GCPtrMem The address of the guest memory.
213 */
214TMPL_MEM_TYPE *
215RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
216 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
217{
218#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
219 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
220#endif
221 Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
222 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
223 return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
224 IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
225}
226
227
228/**
229 * Maps a data buffer for writeonly direct access (or via a bounce buffer),
230 * longjmp on error.
231 *
232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
233 * @param pbUnmapInfo Pointer to unmap info variable.
234 * @param iSegReg The index of the segment register to use for
235 * this access. The base and limits are checked.
236 * @param GCPtrMem The address of the guest memory.
237 */
238TMPL_MEM_TYPE *
239RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
240 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
241{
242#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
243 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
244#endif
245 Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
246 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); /* zero is for the TLB hit */
247 return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
248 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
249}
250
251
252/**
253 * Maps a data buffer for readonly direct access (or via a bounce buffer),
254 * longjmp on error.
255 *
256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
257 * @param pbUnmapInfo Pointer to unmap info variable.
258 * @param iSegReg The index of the segment register to use for
259 * this access. The base and limits are checked.
260 * @param GCPtrMem The address of the guest memory.
261 */
262TMPL_MEM_TYPE const *
263RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
264 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
265{
266#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
267 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
268#endif
269 Log4(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
270 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */
271 return (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
272 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
273}
274
275
276#ifdef TMPL_MEM_WITH_STACK
277
278/**
279 * Pops a general purpose register off the stack.
280 *
281 * @returns Strict VBox status code.
282 * @param pVCpu The cross context virtual CPU structure of the
283 * calling thread.
284 * @param iGReg The GREG to load the popped value into.
285 */
286VBOXSTRICTRC RT_CONCAT(iemMemStackPopGReg,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iGReg) RT_NOEXCEPT
287{
288 Assert(iGReg < 16);
289
290 /* Increment the stack pointer. */
291 uint64_t uNewRsp;
292 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
293
294 /* Load the word the lazy way. */
295 uint8_t bUnmapInfo;
296 TMPL_MEM_TYPE const *puSrc;
297 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
298 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
299 if (rc == VINF_SUCCESS)
300 {
301 TMPL_MEM_TYPE const uValue = *puSrc;
302 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
303
304 /* Commit the register and new RSP values. */
305 if (rc == VINF_SUCCESS)
306 {
307 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
308 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
309 pVCpu->cpum.GstCtx.rsp = uNewRsp;
310 if (sizeof(TMPL_MEM_TYPE) != sizeof(uint16_t))
311 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
312 else
313 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
314 return VINF_SUCCESS;
315 }
316 }
317 return rc;
318}
319
320
321/**
322 * Pushes an item onto the stack, regular version.
323 *
324 * @returns Strict VBox status code.
325 * @param pVCpu The cross context virtual CPU structure of the
326 * calling thread.
327 * @param uValue The value to push.
328 */
329VBOXSTRICTRC RT_CONCAT(iemMemStackPush,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) RT_NOEXCEPT
330{
331 /* Increment the stack pointer. */
332 uint64_t uNewRsp;
333 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
334
335 /* Write the dword the lazy way. */
336 uint8_t bUnmapInfo;
337 TMPL_MEM_TYPE *puDst;
338 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
339 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
340 if (rc == VINF_SUCCESS)
341 {
342 *puDst = uValue;
343 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
344
345 /* Commit the new RSP value unless we an access handler made trouble. */
346 if (rc == VINF_SUCCESS)
347 {
348 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
349 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
350 pVCpu->cpum.GstCtx.rsp = uNewRsp;
351 return VINF_SUCCESS;
352 }
353 }
354
355 return rc;
356}
357
358
359/**
360 * Pops a generic item off the stack, regular version.
361 *
362 * This is used by C-implementation code.
363 *
364 * @returns Strict VBox status code.
365 * @param pVCpu The cross context virtual CPU structure of the
366 * calling thread.
367 * @param puValue Where to store the popped value.
368 */
369VBOXSTRICTRC RT_CONCAT(iemMemStackPop,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue) RT_NOEXCEPT
370{
371 /* Increment the stack pointer. */
372 uint64_t uNewRsp;
373 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
374
375 /* Write the word the lazy way. */
376 uint8_t bUnmapInfo;
377 TMPL_MEM_TYPE const *puSrc;
378 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
379 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
380 if (rc == VINF_SUCCESS)
381 {
382 *puValue = *puSrc;
383 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
384
385 /* Commit the new RSP value. */
386 if (rc == VINF_SUCCESS)
387 {
388 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
389 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, *puValue));
390 pVCpu->cpum.GstCtx.rsp = uNewRsp;
391 return VINF_SUCCESS;
392 }
393 }
394 return rc;
395}
396
397
398/**
399 * Pushes an item onto the stack, using a temporary stack pointer.
400 *
401 * @returns Strict VBox status code.
402 * @param pVCpu The cross context virtual CPU structure of the
403 * calling thread.
404 * @param uValue The value to push.
405 * @param pTmpRsp Pointer to the temporary stack pointer.
406 */
407VBOXSTRICTRC RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
408{
409 /* Increment the stack pointer. */
410 RTUINT64U NewRsp = *pTmpRsp;
411 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
412
413 /* Write the word the lazy way. */
414 uint8_t bUnmapInfo;
415 TMPL_MEM_TYPE *puDst;
416 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
417 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
418 if (rc == VINF_SUCCESS)
419 {
420 *puDst = uValue;
421 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
422
423 /* Commit the new RSP value unless we an access handler made trouble. */
424 if (rc == VINF_SUCCESS)
425 {
426 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
427 GCPtrTop, pTmpRsp->u, NewRsp.u, uValue));
428 *pTmpRsp = NewRsp;
429 return VINF_SUCCESS;
430 }
431 }
432 return rc;
433}
434
435
436/**
437 * Pops an item off the stack, using a temporary stack pointer.
438 *
439 * @returns Strict VBox status code.
440 * @param pVCpu The cross context virtual CPU structure of the
441 * calling thread.
442 * @param puValue Where to store the popped value.
443 * @param pTmpRsp Pointer to the temporary stack pointer.
444 */
445VBOXSTRICTRC
446RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
447{
448 /* Increment the stack pointer. */
449 RTUINT64U NewRsp = *pTmpRsp;
450 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
451
452 /* Write the word the lazy way. */
453 uint8_t bUnmapInfo;
454 TMPL_MEM_TYPE const *puSrc;
455 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
456 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
457 if (rc == VINF_SUCCESS)
458 {
459 *puValue = *puSrc;
460 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
461
462 /* Commit the new RSP value. */
463 if (rc == VINF_SUCCESS)
464 {
465 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
466 GCPtrTop, pTmpRsp->u, NewRsp.u, *puValue));
467 *pTmpRsp = NewRsp;
468 return VINF_SUCCESS;
469 }
470 }
471 return rc;
472}
473
474
475/**
476 * Safe/fallback stack store function that longjmps on error.
477 */
478void RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
479 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
480{
481# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
482 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
483# endif
484
485 uint8_t bUnmapInfo;
486 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrMem,
487 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
488 *puDst = uValue;
489 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
490
491 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
492}
493
494
495# ifdef TMPL_WITH_PUSH_SREG
496/**
497 * Safe/fallback stack SREG store function that longjmps on error.
498 */
499void RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
500 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
501{
502# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
503 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
504# endif
505
506 /* bs3-cpu-weird-1 explores this instruction. AMD 3990X does it by the book,
507 with a zero extended DWORD write. While my Intel 10890XE goes all weird
508 in real mode where it will write a DWORD with the top word of EFLAGS in
509 the top half. In all other modes it does a WORD access. */
510
511 /** @todo Docs indicate the behavior changed maybe in Pentium or Pentium Pro.
512 * Check ancient hardware when it actually did change. */
513 uint8_t bUnmapInfo;
514 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
515 {
516 if (!IEM_IS_REAL_MODE(pVCpu))
517 {
518 /* WORD per intel specs. */
519 uint16_t *puDst = (uint16_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrMem,
520 IEM_ACCESS_STACK_W, (sizeof(uint16_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); /** @todo 2 or 4 alignment check for PUSH SS? */
521 *puDst = (uint16_t)uValue;
522 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
523 Log12(("IEM WR 'word' SS|%RGv: %#06x [sreg/i]\n", GCPtrMem, (uint16_t)uValue));
524 }
525 else
526 {
527 /* DWORD real mode weirness observed on 10980XE. */
528 /** @todo Check this on other intel CPUs and when pushing registers other
529 * than FS (which all that bs3-cpu-weird-1 does atm). (Maybe this is
530 * something for the CPU profile... Hope not.) */
531 uint32_t *puDst = (uint32_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem,
532 IEM_ACCESS_STACK_W, (sizeof(uint32_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD);
533 *puDst = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK));
534 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
535 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg/ir]\n", GCPtrMem, uValue));
536 }
537 }
538 else
539 {
540 /* DWORD per spec. */
541 uint32_t *puDst = (uint32_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem,
542 IEM_ACCESS_STACK_W, (sizeof(uint32_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD);
543 *puDst = uValue;
544 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
545 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg]\n", GCPtrMem, uValue));
546 }
547}
548# endif /* TMPL_WITH_PUSH_SREG */
549
550
551/**
552 * Safe/fallback stack fetch function that longjmps on error.
553 */
554TMPL_MEM_TYPE RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
555{
556# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
557 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
558# endif
559
560 /* Read the data. */
561 uint8_t bUnmapInfo;
562 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,
563 GCPtrMem, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
564 TMPL_MEM_TYPE const uValue = *puSrc;
565 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
566
567 /* Commit the register and RSP values. */
568 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
569 return uValue;
570}
571
572
573/**
574 * Safe/fallback stack push function that longjmps on error.
575 */
576void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
577{
578# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
579 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
580# endif
581
582 /* Decrement the stack pointer (prep). */
583 uint64_t uNewRsp;
584 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
585
586 /* Write the data. */
587 uint8_t bUnmapInfo;
588 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
589 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
590 *puDst = uValue;
591 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
592
593 /* Commit the RSP change. */
594 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
595 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
596 pVCpu->cpum.GstCtx.rsp = uNewRsp;
597}
598
599
600/**
601 * Safe/fallback stack pop greg function that longjmps on error.
602 */
603void RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
604{
605# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
606 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
607# endif
608
609 /* Increment the stack pointer. */
610 uint64_t uNewRsp;
611 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
612
613 /* Read the data. */
614 uint8_t bUnmapInfo;
615 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,
616 GCPtrTop, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
617 TMPL_MEM_TYPE const uValue = *puSrc;
618 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
619
620 /* Commit the register and RSP values. */
621 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
622 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
623 pVCpu->cpum.GstCtx.rsp = uNewRsp;
624 if (sizeof(TMPL_MEM_TYPE) != sizeof(uint16_t))
625 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
626 else
627 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
628}
629
630# ifdef TMPL_WITH_PUSH_SREG
631/**
632 * Safe/fallback stack push function that longjmps on error.
633 */
634void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
635{
636# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
637 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
638# endif
639
640 /* Decrement the stack pointer (prep). */
641 uint64_t uNewRsp;
642 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
643
644 /* Write the data. */
645 /* The intel docs talks about zero extending the selector register
646 value. My actual intel CPU here might be zero extending the value
647 but it still only writes the lower word... */
648 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
649 * happens when crossing an electric page boundrary, is the high word checked
650 * for write accessibility or not? Probably it is. What about segment limits?
651 * It appears this behavior is also shared with trap error codes.
652 *
653 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
654 * ancient hardware when it actually did change. */
655 uint8_t bUnmapInfo;
656 uint16_t *puDst = (uint16_t *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrTop,
657 IEM_ACCESS_STACK_W, (sizeof(uint16_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); /** @todo 2 or 4 alignment check for PUSH SS? */
658 *puDst = (uint16_t)uValue;
659 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
660
661 /* Commit the RSP change. */
662 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
663 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
664 pVCpu->cpum.GstCtx.rsp = uNewRsp;
665}
666# endif /* TMPL_WITH_PUSH_SREG */
667
668#endif /* TMPL_MEM_WITH_STACK */
669
670/* clean up */
671#undef TMPL_MEM_TYPE
672#undef TMPL_MEM_TYPE_ALIGN
673#undef TMPL_MEM_FN_SUFF
674#undef TMPL_MEM_FMT_TYPE
675#undef TMPL_MEM_FMT_DESC
676#undef TMPL_WITH_PUSH_SREG
677#undef TMPL_MEM_MAP_FLAGS_ADD
678
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette