VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMInlineMem-x86.h@ 108278

Last change on this file since 108278 was 108278, checked in by vboxsync, 2 months ago

VMM/IEM: Removed the #ifndef IEM_WITH_SETJMP code. We've had IEM_WITH_SETJMP defined unconditionally since 7.0 and the code probably doesn't even compile w/o it, so best remove the unused code. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 17.0 KB
Line 
1/* $Id: IEMInlineMem-x86.h 108278 2025-02-18 15:46:53Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Memory Functions, x86 target.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInlineMem_x86_h
29#define VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInlineMem_x86_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <iprt/errcore.h>
35
36
37
38
39/** @name Memory access.
40 *
41 * @{
42 */
43
44/**
45 * Checks whether alignment checks are enabled or not.
46 *
47 * @returns true if enabled, false if not.
48 * @param pVCpu The cross context virtual CPU structure of the calling thread.
49 */
50DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu) RT_NOEXCEPT
51{
52#if 0
53 AssertCompile(X86_CR0_AM == X86_EFL_AC);
54 return IEM_GET_CPL(pVCpu) == 3
55 && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM);
56#else
57 return RT_BOOL(pVCpu->iem.s.fExec & IEM_F_X86_AC);
58#endif
59}
60
61/**
62 * Checks if the given segment can be written to, raise the appropriate
63 * exception if not.
64 *
65 * @returns VBox strict status code.
66 *
67 * @param pVCpu The cross context virtual CPU structure of the calling thread.
68 * @param pHid Pointer to the hidden register.
69 * @param iSegReg The register number.
70 * @param pu64BaseAddr Where to return the base address to use for the
71 * segment. (In 64-bit code it may differ from the
72 * base in the hidden segment.)
73 */
74DECLINLINE(VBOXSTRICTRC) iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
75 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
76{
77 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
78
79 if (IEM_IS_64BIT_CODE(pVCpu))
80 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
81 else
82 {
83 if (!pHid->Attr.n.u1Present)
84 {
85 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
86 AssertRelease(uSel == 0);
87 LogEx(LOG_GROUP_IEM,("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
88 return iemRaiseGeneralProtectionFault0(pVCpu);
89 }
90
91 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
92 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
93 && !IEM_IS_64BIT_CODE(pVCpu) )
94 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
95 *pu64BaseAddr = pHid->u64Base;
96 }
97 return VINF_SUCCESS;
98}
99
100
101/**
102 * Checks if the given segment can be read from, raise the appropriate
103 * exception if not.
104 *
105 * @returns VBox strict status code.
106 *
107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
108 * @param pHid Pointer to the hidden register.
109 * @param iSegReg The register number.
110 * @param pu64BaseAddr Where to return the base address to use for the
111 * segment. (In 64-bit code it may differ from the
112 * base in the hidden segment.)
113 */
114DECLINLINE(VBOXSTRICTRC) iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
115 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
116{
117 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
118
119 if (IEM_IS_64BIT_CODE(pVCpu))
120 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
121 else
122 {
123 if (!pHid->Attr.n.u1Present)
124 {
125 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
126 AssertRelease(uSel == 0);
127 LogEx(LOG_GROUP_IEM,("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
128 return iemRaiseGeneralProtectionFault0(pVCpu);
129 }
130
131 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
132 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
133 *pu64BaseAddr = pHid->u64Base;
134 }
135 return VINF_SUCCESS;
136}
137
138
139
140/** @todo slim this down */
141DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg,
142 size_t cbMem, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
143{
144 Assert(cbMem >= 1);
145 Assert(iSegReg < X86_SREG_COUNT);
146
147 /*
148 * 64-bit mode is simpler.
149 */
150 if (IEM_IS_64BIT_CODE(pVCpu))
151 {
152 if (iSegReg >= X86_SREG_FS && iSegReg != UINT8_MAX)
153 {
154 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
155 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
156 GCPtrMem += pSel->u64Base;
157 }
158
159 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
160 return GCPtrMem;
161 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
162 }
163 /*
164 * 16-bit and 32-bit segmentation.
165 */
166 else if (iSegReg != UINT8_MAX)
167 {
168 /** @todo Does this apply to segments with 4G-1 limit? */
169 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
170 if (RT_LIKELY(GCPtrLast32 >= (uint32_t)GCPtrMem))
171 {
172 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
173 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
174 switch (pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
175 | X86_SEL_TYPE_READ | X86_SEL_TYPE_WRITE /* same as read */
176 | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_CONF /* same as down */
177 | X86_SEL_TYPE_CODE))
178 {
179 case X86DESCATTR_P: /* readonly data, expand up */
180 case X86DESCATTR_P | X86_SEL_TYPE_WRITE: /* writable data, expand up */
181 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ: /* code, read-only */
182 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_CONF: /* conforming code, read-only */
183 /* expand up */
184 if (RT_LIKELY(GCPtrLast32 <= pSel->u32Limit))
185 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
186 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x vs %#x\n",
187 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit));
188 break;
189
190 case X86DESCATTR_P | X86_SEL_TYPE_DOWN: /* readonly data, expand down */
191 case X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_WRITE: /* writable data, expand down */
192 /* expand down */
193 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
194 && ( pSel->Attr.n.u1DefBig
195 || GCPtrLast32 <= UINT32_C(0xffff)) ))
196 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
197 Log10(("iemMemApplySegmentToReadJmp: expand down out of bounds %#x..%#x vs %#x..%#x\n",
198 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit, pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT16_MAX));
199 break;
200
201 default:
202 Log10(("iemMemApplySegmentToReadJmp: bad selector %#x\n", pSel->Attr.u));
203 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
204 break;
205 }
206 }
207 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x\n",(uint32_t)GCPtrMem, GCPtrLast32));
208 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
209 }
210 /*
211 * 32-bit flat address.
212 */
213 else
214 return GCPtrMem;
215}
216
217
218/** @todo slim this down */
219DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem,
220 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
221{
222 Assert(cbMem >= 1);
223 Assert(iSegReg < X86_SREG_COUNT);
224
225 /*
226 * 64-bit mode is simpler.
227 */
228 if (IEM_IS_64BIT_CODE(pVCpu))
229 {
230 if (iSegReg >= X86_SREG_FS)
231 {
232 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
233 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
234 GCPtrMem += pSel->u64Base;
235 }
236
237 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
238 return GCPtrMem;
239 }
240 /*
241 * 16-bit and 32-bit segmentation.
242 */
243 else
244 {
245 Assert(GCPtrMem <= UINT32_MAX);
246 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
247 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
248 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
249 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
250 if ( fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE) /* data, expand up */
251 /** @todo explore exactly how the CS stuff works in real mode. See also
252 * http://www.rcollins.org/Productivity/DescriptorCache.html and
253 * http://www.rcollins.org/ddj/Aug98/Aug98.html for some insight. */
254 || (iSegReg == X86_SREG_CS && IEM_IS_REAL_OR_V86_MODE(pVCpu)) ) /* Ignored for CS. */ /** @todo testcase! */
255 {
256 /* expand up */
257 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
258 if (RT_LIKELY( GCPtrLast32 <= pSel->u32Limit
259 && GCPtrLast32 >= (uint32_t)GCPtrMem))
260 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
261 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
262 }
263 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
264 {
265 /* expand down - the uppger boundary is defined by the B bit, not G. */
266 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
267 if (RT_LIKELY( (uint32_t)GCPtrMem >= pSel->u32Limit
268 && (pSel->Attr.n.u1DefBig || GCPtrLast32 <= UINT32_C(0xffff))
269 && GCPtrLast32 >= (uint32_t)GCPtrMem))
270 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
271 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
272 }
273 else
274 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
275 }
276 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
277}
278
279
280/**
281 * Fakes a long mode stack selector for SS = 0.
282 *
283 * @param pDescSs Where to return the fake stack descriptor.
284 * @param uDpl The DPL we want.
285 */
286DECLINLINE(void) iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl) RT_NOEXCEPT
287{
288 pDescSs->Long.au64[0] = 0;
289 pDescSs->Long.au64[1] = 0;
290 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
291 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
292 pDescSs->Long.Gen.u2Dpl = uDpl;
293 pDescSs->Long.Gen.u1Present = 1;
294 pDescSs->Long.Gen.u1Long = 1;
295}
296
297
298/*
299 * Instantiate R/W inline templates.
300 */
301
302/** @def TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
303 * Used to check if an unaligned access is if within the page and won't
304 * trigger an \#AC.
305 *
306 * This can also be used to deal with misaligned accesses on platforms that are
307 * senstive to such if desires.
308 */
309#if 1
310# define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) \
311 ( ((a_GCPtrEff) & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(a_TmplMemType) \
312 && !((a_pVCpu)->iem.s.fExec & IEM_F_X86_AC) )
313#else
314# define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) 0
315#endif
316
317#define TMPL_MEM_WITH_ATOMIC_MAPPING
318
319#define TMPL_MEM_TYPE uint8_t
320#define TMPL_MEM_TYPE_ALIGN 0
321#define TMPL_MEM_TYPE_SIZE 1
322#define TMPL_MEM_FN_SUFF U8
323#define TMPL_MEM_FMT_TYPE "%#04x"
324#define TMPL_MEM_FMT_DESC "byte"
325#include "IEMAllMemRWTmplInline-x86.cpp.h"
326
327#define TMPL_MEM_WITH_STACK
328
329#define TMPL_MEM_TYPE uint16_t
330#define TMPL_MEM_TYPE_ALIGN 1
331#define TMPL_MEM_TYPE_SIZE 2
332#define TMPL_MEM_FN_SUFF U16
333#define TMPL_MEM_FMT_TYPE "%#06x"
334#define TMPL_MEM_FMT_DESC "word"
335#include "IEMAllMemRWTmplInline-x86.cpp.h"
336
337#define TMPL_WITH_PUSH_SREG
338#define TMPL_MEM_TYPE uint32_t
339#define TMPL_MEM_TYPE_ALIGN 3
340#define TMPL_MEM_TYPE_SIZE 4
341#define TMPL_MEM_FN_SUFF U32
342#define TMPL_MEM_FMT_TYPE "%#010x"
343#define TMPL_MEM_FMT_DESC "dword"
344#include "IEMAllMemRWTmplInline-x86.cpp.h"
345#undef TMPL_WITH_PUSH_SREG
346
347#define TMPL_MEM_TYPE uint64_t
348#define TMPL_MEM_TYPE_ALIGN 7
349#define TMPL_MEM_TYPE_SIZE 8
350#define TMPL_MEM_FN_SUFF U64
351#define TMPL_MEM_FMT_TYPE "%#018RX64"
352#define TMPL_MEM_FMT_DESC "qword"
353#include "IEMAllMemRWTmplInline-x86.cpp.h"
354
355#undef TMPL_MEM_WITH_STACK
356#undef TMPL_MEM_WITH_ATOMIC_MAPPING
357
358#define TMPL_MEM_NO_MAPPING /* currently sticky */
359
360#define TMPL_MEM_NO_STORE
361#define TMPL_MEM_TYPE uint32_t
362#define TMPL_MEM_TYPE_ALIGN 0
363#define TMPL_MEM_TYPE_SIZE 4
364#define TMPL_MEM_FN_SUFF U32NoAc
365#define TMPL_MEM_FMT_TYPE "%#010x"
366#define TMPL_MEM_FMT_DESC "dword"
367#include "IEMAllMemRWTmplInline-x86.cpp.h"
368
369#define TMPL_MEM_NO_STORE
370#define TMPL_MEM_TYPE uint64_t
371#define TMPL_MEM_TYPE_ALIGN 0
372#define TMPL_MEM_TYPE_SIZE 8
373#define TMPL_MEM_FN_SUFF U64NoAc
374#define TMPL_MEM_FMT_TYPE "%#018RX64"
375#define TMPL_MEM_FMT_DESC "qword"
376#include "IEMAllMemRWTmplInline-x86.cpp.h"
377
378#define TMPL_MEM_NO_STORE
379#define TMPL_MEM_TYPE uint64_t
380#define TMPL_MEM_TYPE_ALIGN 15
381#define TMPL_MEM_TYPE_SIZE 8
382#define TMPL_MEM_FN_SUFF U64AlignedU128
383#define TMPL_MEM_FMT_TYPE "%#018RX64"
384#define TMPL_MEM_FMT_DESC "qword"
385#include "IEMAllMemRWTmplInline-x86.cpp.h"
386
387#undef TMPL_MEM_NO_MAPPING
388
389#define TMPL_MEM_TYPE RTFLOAT80U
390#define TMPL_MEM_TYPE_ALIGN 7
391#define TMPL_MEM_TYPE_SIZE 10
392#define TMPL_MEM_FN_SUFF R80
393#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
394#define TMPL_MEM_FMT_DESC "tword"
395#include "IEMAllMemRWTmplInline-x86.cpp.h"
396
397#define TMPL_MEM_TYPE RTPBCD80U
398#define TMPL_MEM_TYPE_ALIGN 7 /** @todo RTPBCD80U alignment testcase */
399#define TMPL_MEM_TYPE_SIZE 10
400#define TMPL_MEM_FN_SUFF D80
401#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
402#define TMPL_MEM_FMT_DESC "tword"
403#include "IEMAllMemRWTmplInline-x86.cpp.h"
404
405#define TMPL_MEM_WITH_ATOMIC_MAPPING
406#define TMPL_MEM_TYPE RTUINT128U
407#define TMPL_MEM_TYPE_ALIGN 15
408#define TMPL_MEM_TYPE_SIZE 16
409#define TMPL_MEM_FN_SUFF U128
410#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
411#define TMPL_MEM_FMT_DESC "dqword"
412#include "IEMAllMemRWTmplInline-x86.cpp.h"
413#undef TMPL_MEM_WITH_ATOMIC_MAPPING
414
415#define TMPL_MEM_NO_MAPPING
416#define TMPL_MEM_TYPE RTUINT128U
417#define TMPL_MEM_TYPE_ALIGN 0
418#define TMPL_MEM_TYPE_SIZE 16
419#define TMPL_MEM_FN_SUFF U128NoAc
420#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
421#define TMPL_MEM_FMT_DESC "dqword"
422#include "IEMAllMemRWTmplInline-x86.cpp.h"
423#undef TMPL_MEM_NO_MAPPING
424
425
426/* Every template relying on unaligned accesses inside a page not being okay should go below. */
427#undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
428#define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) 0
429
430#define TMPL_MEM_NO_MAPPING
431#define TMPL_MEM_TYPE RTUINT128U
432#define TMPL_MEM_TYPE_ALIGN 15
433#define TMPL_MEM_TYPE_SIZE 16
434#define TMPL_MEM_FN_SUFF U128AlignedSse
435#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
436#define TMPL_MEM_FMT_DESC "dqword"
437#include "IEMAllMemRWTmplInline-x86.cpp.h"
438#undef TMPL_MEM_NO_MAPPING
439
440#define TMPL_MEM_NO_MAPPING
441#define TMPL_MEM_TYPE RTUINT256U
442#define TMPL_MEM_TYPE_ALIGN 0
443#define TMPL_MEM_TYPE_SIZE 32
444#define TMPL_MEM_FN_SUFF U256NoAc
445#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
446#define TMPL_MEM_FMT_DESC "qqword"
447#include "IEMAllMemRWTmplInline-x86.cpp.h"
448#undef TMPL_MEM_NO_MAPPING
449
450#define TMPL_MEM_NO_MAPPING
451#define TMPL_MEM_TYPE RTUINT256U
452#define TMPL_MEM_TYPE_ALIGN 31
453#define TMPL_MEM_TYPE_SIZE 32
454#define TMPL_MEM_FN_SUFF U256AlignedAvx
455#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
456#define TMPL_MEM_FMT_DESC "qqword"
457#include "IEMAllMemRWTmplInline-x86.cpp.h"
458#undef TMPL_MEM_NO_MAPPING
459
460#undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
461
462/** @} */
463
464#endif /* !VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInlineMem_x86_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette