VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h@ 102727

Last change on this file since 102727 was 102576, checked in by vboxsync, 15 months ago

VMM/IEM: Build fix. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.3 KB
Line 
1/* $Id: IEMAllMemRWTmpl.cpp.h 102576 2023-12-11 20:08:01Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_ALIGN
34# define TMPL_MEM_TYPE_ALIGN (sizeof(TMPL_MEM_TYPE) - 1)
35#endif
36#ifndef TMPL_MEM_FN_SUFF
37# error "TMPL_MEM_FN_SUFF is undefined"
38#endif
39#ifndef TMPL_MEM_FMT_TYPE
40# error "TMPL_MEM_FMT_TYPE is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_DESC
43# error "TMPL_MEM_FMT_DESC is undefined"
44#endif
45
46
47/**
48 * Standard fetch function.
49 *
50 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
51 * is defined.
52 */
53VBOXSTRICTRC RT_CONCAT(iemMemFetchData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puDst,
54 uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
55{
56 /* The lazy approach for now... */
57 uint8_t bUnmapInfo;
58 TMPL_MEM_TYPE const *puSrc;
59 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
60 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
61 if (rc == VINF_SUCCESS)
62 {
63 *puDst = *puSrc;
64 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
65 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, *puDst));
66 }
67 return rc;
68}
69
70
71#ifdef IEM_WITH_SETJMP
72/**
73 * Safe/fallback fetch function that longjmps on error.
74 */
75# ifdef TMPL_MEM_BY_REF
76void
77RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pDst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
78{
79# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
80 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
81# endif
82 uint8_t bUnmapInfo;
83 TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pSrc), iSegReg, GCPtrMem,
84 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
85 *pDst = *pSrc;
86 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
87 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pDst));
88}
89# else /* !TMPL_MEM_BY_REF */
90TMPL_MEM_TYPE
91RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
92{
93# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
94 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
95# endif
96 uint8_t bUnmapInfo;
97 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
98 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
99 TMPL_MEM_TYPE const uRet = *puSrc;
100 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
101 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet));
102 return uRet;
103}
104# endif /* !TMPL_MEM_BY_REF */
105#endif /* IEM_WITH_SETJMP */
106
107
108
109/**
110 * Standard store function.
111 *
112 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
113 * is defined.
114 */
115VBOXSTRICTRC RT_CONCAT(iemMemStoreData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
116#ifdef TMPL_MEM_BY_REF
117 TMPL_MEM_TYPE const *pValue) RT_NOEXCEPT
118#else
119 TMPL_MEM_TYPE uValue) RT_NOEXCEPT
120#endif
121{
122 /* The lazy approach for now... */
123 uint8_t bUnmapInfo;
124 TMPL_MEM_TYPE *puDst;
125 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(*puDst),
126 iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
127 if (rc == VINF_SUCCESS)
128 {
129#ifdef TMPL_MEM_BY_REF
130 *puDst = *pValue;
131#else
132 *puDst = uValue;
133#endif
134 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
135#ifdef TMPL_MEM_BY_REF
136 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
137#else
138 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
139#endif
140 }
141 return rc;
142}
143
144
145#ifdef IEM_WITH_SETJMP
146/**
147 * Stores a data byte, longjmp on error.
148 *
149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
150 * @param iSegReg The index of the segment register to use for
151 * this access. The base and limits are checked.
152 * @param GCPtrMem The address of the guest memory.
153 * @param uValue The value to store.
154 */
155void RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
156#ifdef TMPL_MEM_BY_REF
157 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
158#else
159 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
160#endif
161{
162# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
163 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
164# endif
165#ifdef TMPL_MEM_BY_REF
166 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
167#else
168 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
169#endif
170 uint8_t bUnmapInfo;
171 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puDst), iSegReg, GCPtrMem,
172 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
173#ifdef TMPL_MEM_BY_REF
174 *puDst = *pValue;
175#else
176 *puDst = uValue;
177#endif
178 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
179}
180#endif /* IEM_WITH_SETJMP */
181
182
183#ifdef IEM_WITH_SETJMP
184
185/**
186 * Maps a data buffer for read+write direct access (or via a bounce buffer),
187 * longjmp on error.
188 *
189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
190 * @param pbUnmapInfo Pointer to unmap info variable.
191 * @param iSegReg The index of the segment register to use for
192 * this access. The base and limits are checked.
193 * @param GCPtrMem The address of the guest memory.
194 */
195TMPL_MEM_TYPE *
196RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
197 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
198{
199# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
200 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
201# endif
202 Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
203 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
204 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
205 IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN);
206}
207
208
209/**
210 * Maps a data buffer for writeonly direct access (or via a bounce buffer),
211 * longjmp on error.
212 *
213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
214 * @param pbUnmapInfo Pointer to unmap info variable.
215 * @param iSegReg The index of the segment register to use for
216 * this access. The base and limits are checked.
217 * @param GCPtrMem The address of the guest memory.
218 */
219TMPL_MEM_TYPE *
220RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
221 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
222{
223# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
224 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
225# endif
226 Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
227 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); /* zero is for the TLB hit */
228 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
229 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
230}
231
232
233/**
234 * Maps a data buffer for readonly direct access (or via a bounce buffer),
235 * longjmp on error.
236 *
237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
238 * @param pbUnmapInfo Pointer to unmap info variable.
239 * @param iSegReg The index of the segment register to use for
240 * this access. The base and limits are checked.
241 * @param GCPtrMem The address of the guest memory.
242 */
243TMPL_MEM_TYPE const *
244RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
245 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
246{
247# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
248 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
249# endif
250 Log4(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
251 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */
252 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
253 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
254}
255
256#endif /* IEM_WITH_SETJMP */
257
258
259#ifdef TMPL_MEM_WITH_STACK
260
261/**
262 * Pops a general purpose register off the stack.
263 *
264 * @returns Strict VBox status code.
265 * @param pVCpu The cross context virtual CPU structure of the
266 * calling thread.
267 * @param iGReg The GREG to load the popped value into.
268 */
269VBOXSTRICTRC RT_CONCAT(iemMemStackPopGReg,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iGReg) RT_NOEXCEPT
270{
271 Assert(iGReg < 16);
272
273 /* Increment the stack pointer. */
274 uint64_t uNewRsp;
275 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
276
277 /* Load the word the lazy way. */
278 uint8_t bUnmapInfo;
279 TMPL_MEM_TYPE const *puSrc;
280 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
281 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
282 if (rc == VINF_SUCCESS)
283 {
284 TMPL_MEM_TYPE const uValue = *puSrc;
285 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
286
287 /* Commit the register and new RSP values. */
288 if (rc == VINF_SUCCESS)
289 {
290 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
291 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
292 pVCpu->cpum.GstCtx.rsp = uNewRsp;
293 if (sizeof(TMPL_MEM_TYPE) != sizeof(uint16_t))
294 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
295 else
296 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
297 return VINF_SUCCESS;
298 }
299 }
300 return rc;
301}
302
303
304/**
305 * Pushes an item onto the stack, regular version.
306 *
307 * @returns Strict VBox status code.
308 * @param pVCpu The cross context virtual CPU structure of the
309 * calling thread.
310 * @param uValue The value to push.
311 */
312VBOXSTRICTRC RT_CONCAT(iemMemStackPush,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) RT_NOEXCEPT
313{
314 /* Increment the stack pointer. */
315 uint64_t uNewRsp;
316 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
317
318 /* Write the dword the lazy way. */
319 uint8_t bUnmapInfo;
320 TMPL_MEM_TYPE *puDst;
321 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
322 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
323 if (rc == VINF_SUCCESS)
324 {
325 *puDst = uValue;
326 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
327
328 /* Commit the new RSP value unless we an access handler made trouble. */
329 if (rc == VINF_SUCCESS)
330 {
331 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
332 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
333 pVCpu->cpum.GstCtx.rsp = uNewRsp;
334 return VINF_SUCCESS;
335 }
336 }
337
338 return rc;
339}
340
341
342/**
343 * Pops a generic item off the stack, regular version.
344 *
345 * This is used by C-implementation code.
346 *
347 * @returns Strict VBox status code.
348 * @param pVCpu The cross context virtual CPU structure of the
349 * calling thread.
350 * @param puValue Where to store the popped value.
351 */
352VBOXSTRICTRC RT_CONCAT(iemMemStackPop,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue) RT_NOEXCEPT
353{
354 /* Increment the stack pointer. */
355 uint64_t uNewRsp;
356 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
357
358 /* Write the word the lazy way. */
359 uint8_t bUnmapInfo;
360 TMPL_MEM_TYPE const *puSrc;
361 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
362 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
363 if (rc == VINF_SUCCESS)
364 {
365 *puValue = *puSrc;
366 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
367
368 /* Commit the new RSP value. */
369 if (rc == VINF_SUCCESS)
370 {
371 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
372 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, *puValue));
373 pVCpu->cpum.GstCtx.rsp = uNewRsp;
374 return VINF_SUCCESS;
375 }
376 }
377 return rc;
378}
379
380
381/**
382 * Pushes an item onto the stack, using a temporary stack pointer.
383 *
384 * @returns Strict VBox status code.
385 * @param pVCpu The cross context virtual CPU structure of the
386 * calling thread.
387 * @param uValue The value to push.
388 * @param pTmpRsp Pointer to the temporary stack pointer.
389 */
390VBOXSTRICTRC RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
391{
392 /* Increment the stack pointer. */
393 RTUINT64U NewRsp = *pTmpRsp;
394 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
395
396 /* Write the word the lazy way. */
397 uint8_t bUnmapInfo;
398 TMPL_MEM_TYPE *puDst;
399 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
400 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
401 if (rc == VINF_SUCCESS)
402 {
403 *puDst = uValue;
404 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
405
406 /* Commit the new RSP value unless we an access handler made trouble. */
407 if (rc == VINF_SUCCESS)
408 {
409 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
410 GCPtrTop, pTmpRsp->u, NewRsp.u, uValue));
411 *pTmpRsp = NewRsp;
412 return VINF_SUCCESS;
413 }
414 }
415 return rc;
416}
417
418
419/**
420 * Pops an item off the stack, using a temporary stack pointer.
421 *
422 * @returns Strict VBox status code.
423 * @param pVCpu The cross context virtual CPU structure of the
424 * calling thread.
425 * @param puValue Where to store the popped value.
426 * @param pTmpRsp Pointer to the temporary stack pointer.
427 */
428VBOXSTRICTRC
429RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
430{
431 /* Increment the stack pointer. */
432 RTUINT64U NewRsp = *pTmpRsp;
433 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
434
435 /* Write the word the lazy way. */
436 uint8_t bUnmapInfo;
437 TMPL_MEM_TYPE const *puSrc;
438 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
439 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
440 if (rc == VINF_SUCCESS)
441 {
442 *puValue = *puSrc;
443 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
444
445 /* Commit the new RSP value. */
446 if (rc == VINF_SUCCESS)
447 {
448 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
449 GCPtrTop, pTmpRsp->u, NewRsp.u, *puValue));
450 *pTmpRsp = NewRsp;
451 return VINF_SUCCESS;
452 }
453 }
454 return rc;
455}
456
457
458# ifdef IEM_WITH_SETJMP
459
460/**
461 * Safe/fallback stack push function that longjmps on error.
462 */
463void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
464{
465# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
466 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
467# endif
468
469 /* Decrement the stack pointer (prep). */
470 uint64_t uNewRsp;
471 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
472
473 /* Write the data. */
474 uint8_t bUnmapInfo;
475 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
476 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
477 *puDst = uValue;
478 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
479
480 /* Commit the RSP change. */
481 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
482 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
483 pVCpu->cpum.GstCtx.rsp = uNewRsp;
484}
485
486
487/**
488 * Safe/fallback stack pop greg function that longjmps on error.
489 */
490void RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
491{
492# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
493 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
494# endif
495
496 /* Increment the stack pointer. */
497 uint64_t uNewRsp;
498 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
499
500 /* Read the data. */
501 uint8_t bUnmapInfo;
502 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,
503 GCPtrTop, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
504 TMPL_MEM_TYPE const uValue = *puSrc;
505 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
506
507 /* Commit the register and RSP values. */
508 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
509 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
510 pVCpu->cpum.GstCtx.rsp = uNewRsp;
511 if (sizeof(TMPL_MEM_TYPE) != sizeof(uint16_t))
512 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
513 else
514 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
515}
516
517
518# ifdef TMPL_WITH_PUSH_SREG
519/**
520 * Safe/fallback stack push function that longjmps on error.
521 */
522void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
523{
524# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
525 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
526# endif
527
528 /* Decrement the stack pointer (prep). */
529 uint64_t uNewRsp;
530 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
531
532 /* Write the data. */
533 /* The intel docs talks about zero extending the selector register
534 value. My actual intel CPU here might be zero extending the value
535 but it still only writes the lower word... */
536 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
537 * happens when crossing an electric page boundrary, is the high word checked
538 * for write accessibility or not? Probably it is. What about segment limits?
539 * It appears this behavior is also shared with trap error codes.
540 *
541 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
542 * ancient hardware when it actually did change. */
543 uint8_t bUnmapInfo;
544 uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrTop,
545 IEM_ACCESS_STACK_W, sizeof(uint16_t) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
546 *puDst = (uint16_t)uValue;
547 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
548
549 /* Commit the RSP change. */
550 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
551 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
552 pVCpu->cpum.GstCtx.rsp = uNewRsp;
553}
554# endif /* TMPL_WITH_PUSH_SREG */
555
556# endif /* IEM_WITH_SETJMP */
557
558#endif /* TMPL_MEM_WITH_STACK */
559
560/* clean up */
561#undef TMPL_MEM_TYPE
562#undef TMPL_MEM_TYPE_ALIGN
563#undef TMPL_MEM_FN_SUFF
564#undef TMPL_MEM_FMT_TYPE
565#undef TMPL_MEM_FMT_DESC
566#undef TMPL_WITH_PUSH_SREG
567
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette