VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllOpcodeFetch-x86.cpp@ 108244

Last change on this file since 108244 was 108244, checked in by vboxsync, 5 weeks ago

VMM/IEM: Splitting up IEMAll.cpp. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 45.5 KB
Line 
1/* $Id: IEMAllOpcodeFetch-x86.cpp 108244 2025-02-16 22:45:02Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/pgm.h>
40#include <VBox/vmm/dbgf.h>
41#include "IEMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include <VBox/log.h>
44#include <VBox/err.h>
45#include <VBox/param.h>
46#include <iprt/assert.h>
47#include <iprt/string.h>
48#include <iprt/x86.h>
49
50#include "IEMInline.h"
51#ifdef VBOX_VMM_TARGET_X86
52# include "IEMAllTlbInline-x86.h"
53#endif
54
55
56#ifndef IEM_WITH_CODE_TLB
57/**
58 * Prefetch opcodes the first time when starting executing.
59 *
60 * @returns Strict VBox status code.
61 * @param pVCpu The cross context virtual CPU structure of the calling
62 * thread.
63 */
64VBOXSTRICTRC iemOpcodeFetchPrefetch(PVMCPUCC pVCpu) RT_NOEXCEPT
65{
66 /*
67 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
68 *
69 * First translate CS:rIP to a physical address.
70 *
71 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
72 * all relevant bytes from the first page, as it ASSUMES it's only ever
73 * called for dealing with CS.LIM, page crossing and instructions that
74 * are too long.
75 */
76 uint32_t cbToTryRead;
77 RTGCPTR GCPtrPC;
78 if (IEM_IS_64BIT_CODE(pVCpu))
79 {
80 cbToTryRead = GUEST_PAGE_SIZE;
81 GCPtrPC = pVCpu->cpum.GstCtx.rip;
82 if (IEM_IS_CANONICAL(GCPtrPC))
83 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
84 else
85 return iemRaiseGeneralProtectionFault0(pVCpu);
86 }
87 else
88 {
89 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
90 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
91 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
92 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
93 else
94 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
95 if (cbToTryRead) { /* likely */ }
96 else /* overflowed */
97 {
98 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
99 cbToTryRead = UINT32_MAX;
100 }
101 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
102 Assert(GCPtrPC <= UINT32_MAX);
103 }
104
105 PGMPTWALKFAST WalkFast;
106 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
107 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
108 &WalkFast);
109 if (RT_SUCCESS(rc))
110 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
111 else
112 {
113 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
114# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
115/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
116 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
117 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
118 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
119# endif
120 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
121 }
122#if 0
123 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
124 else
125 {
126 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
127# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
128/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
129# error completely wrong
130 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
131 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
132# endif
133 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
134 }
135 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
136 else
137 {
138 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
139# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
140/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
141# error completely wrong.
142 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
143 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
144# endif
145 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
146 }
147#else
148 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
149 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
150#endif
151 RTGCPHYS const GCPhys = WalkFast.GCPhys;
152
153 /*
154 * Read the bytes at this address.
155 */
156 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
157 if (cbToTryRead > cbLeftOnPage)
158 cbToTryRead = cbLeftOnPage;
159 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
160 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
161
162 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
163 {
164 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
165 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
166 { /* likely */ }
167 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
168 {
169 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
170 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
171 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
172 }
173 else
174 {
175 Log((RT_SUCCESS(rcStrict)
176 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
177 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
178 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
179 return rcStrict;
180 }
181 }
182 else
183 {
184 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
185 if (RT_SUCCESS(rc))
186 { /* likely */ }
187 else
188 {
189 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
190 GCPtrPC, GCPhys, rc, cbToTryRead));
191 return rc;
192 }
193 }
194 pVCpu->iem.s.cbOpcode = cbToTryRead;
195 return VINF_SUCCESS;
196}
197#endif /* !IEM_WITH_CODE_TLB */
198
199
200/**
201 * Flushes the prefetch buffer, light version.
202 */
203void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
204{
205#ifndef IEM_WITH_CODE_TLB
206 pVCpu->iem.s.cbOpcode = cbInstr;
207#else
208 RT_NOREF(pVCpu, cbInstr);
209#endif
210}
211
212
213/**
214 * Flushes the prefetch buffer, heavy version.
215 */
216void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
217{
218#ifndef IEM_WITH_CODE_TLB
219 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
220#elif 1
221 pVCpu->iem.s.cbInstrBufTotal = 0;
222 RT_NOREF(cbInstr);
223#else
224 RT_NOREF(pVCpu, cbInstr);
225#endif
226}
227
228
229
230#ifdef IEM_WITH_CODE_TLB
231
232/**
233 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
234 * failure and jumps.
235 *
236 * We end up here for a number of reasons:
237 * - pbInstrBuf isn't yet initialized.
238 * - Advancing beyond the buffer boundrary (e.g. cross page).
239 * - Advancing beyond the CS segment limit.
240 * - Fetching from non-mappable page (e.g. MMIO).
241 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
242 *
243 * @param pVCpu The cross context virtual CPU structure of the
244 * calling thread.
245 * @param pvDst Where to return the bytes.
246 * @param cbDst Number of bytes to read. A value of zero is
247 * allowed for initializing pbInstrBuf (the
248 * recompiler does this). In this case it is best
249 * to set pbInstrBuf to NULL prior to the call.
250 */
251void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
252{
253# ifdef IN_RING3
254 for (;;)
255 {
256 Assert(cbDst <= 8);
257 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
258
259 /*
260 * We might have a partial buffer match, deal with that first to make the
261 * rest simpler. This is the first part of the cross page/buffer case.
262 */
263 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
264 if (pbInstrBuf != NULL)
265 {
266 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
267 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
268 if (offBuf < cbInstrBuf)
269 {
270 Assert(offBuf + cbDst > cbInstrBuf);
271 uint32_t const cbCopy = cbInstrBuf - offBuf;
272 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
273
274 cbDst -= cbCopy;
275 pvDst = (uint8_t *)pvDst + cbCopy;
276 offBuf += cbCopy;
277 }
278 }
279
280 /*
281 * Check segment limit, figuring how much we're allowed to access at this point.
282 *
283 * We will fault immediately if RIP is past the segment limit / in non-canonical
284 * territory. If we do continue, there are one or more bytes to read before we
285 * end up in trouble and we need to do that first before faulting.
286 */
287 RTGCPTR GCPtrFirst;
288 uint32_t cbMaxRead;
289 if (IEM_IS_64BIT_CODE(pVCpu))
290 {
291 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
292 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
293 { /* likely */ }
294 else
295 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
296 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
297 }
298 else
299 {
300 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
301 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
302 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
303 { /* likely */ }
304 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
305 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
306 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
307 if (cbMaxRead != 0)
308 { /* likely */ }
309 else
310 {
311 /* Overflowed because address is 0 and limit is max. */
312 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
313 cbMaxRead = X86_PAGE_SIZE;
314 }
315 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
316 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
317 if (cbMaxRead2 < cbMaxRead)
318 cbMaxRead = cbMaxRead2;
319 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
320 }
321
322 /*
323 * Get the TLB entry for this piece of code.
324 */
325 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
326 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
327 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
328 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
329 {
330 /* likely when executing lots of code, otherwise unlikely */
331# ifdef IEM_WITH_TLB_STATISTICS
332 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
333# endif
334 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
335
336 /* Check TLB page table level access flags. */
337 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
338 {
339 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
340 {
341 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
342 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
343 }
344 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
345 {
346 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
347 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
348 }
349 }
350
351 /* Look up the physical page info if necessary. */
352 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
353 { /* not necessary */ }
354 else
355 {
356 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
357 { /* likely */ }
358 else
359 iemTlbInvalidateAllPhysicalSlow(pVCpu);
360 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
361 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
362 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
363 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
364 }
365 }
366 else
367 {
368 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
369
370 /* This page table walking will set A bits as required by the access while performing the walk.
371 ASSUMES these are set when the address is translated rather than on commit... */
372 /** @todo testcase: check when A bits are actually set by the CPU for code. */
373 PGMPTWALKFAST WalkFast;
374 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
375 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
376 &WalkFast);
377 if (RT_SUCCESS(rc))
378 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
379 else
380 {
381# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
382 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
383 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
384# endif
385 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
386 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
387 }
388
389 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
390 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
391 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
392 {
393 pTlbe--;
394 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
395 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
396 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
397# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
398 else
399 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
400# endif
401 }
402 else
403 {
404 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
405 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
406 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
407 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
408# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
409 else
410 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
411# endif
412 }
413 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
414 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
415 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
416 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
417 pTlbe->GCPhys = GCPhysPg;
418 pTlbe->pbMappingR3 = NULL;
419 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
420 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
421 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
422
423 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
424 IEMTLBTRACE_LOAD( pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
425 else
426 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
427
428 /* Resolve the physical address. */
429 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
430 { /* likely */ }
431 else
432 iemTlbInvalidateAllPhysicalSlow(pVCpu);
433 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
434 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
435 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
436 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
437 }
438
439# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
440 /*
441 * Try do a direct read using the pbMappingR3 pointer.
442 * Note! Do not recheck the physical TLB revision number here as we have the
443 * wrong response to changes in the else case. If someone is updating
444 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
445 * pretending we always won the race.
446 */
447 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
448 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
449 {
450 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
451 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
452 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
453 {
454 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
455 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
456 }
457 else
458 {
459 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
460 if (cbInstr + (uint32_t)cbDst <= 15)
461 {
462 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
463 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
464 }
465 else
466 {
467 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
468 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
469 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
470 }
471 }
472 if (cbDst <= cbMaxRead)
473 {
474 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
475# if 0 /* unused */
476 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
477# endif
478 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
479 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
480 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
481 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
482 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
483 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
484 else
485 Assert(!pvDst);
486 return;
487 }
488 pVCpu->iem.s.pbInstrBuf = NULL;
489
490 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
491 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
492 }
493# else
494# error "refactor as needed"
495 /*
496 * If there is no special read handling, so we can read a bit more and
497 * put it in the prefetch buffer.
498 */
499 if ( cbDst < cbMaxRead
500 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
501 {
502 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
503 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
504 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
505 { /* likely */ }
506 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
507 {
508 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
509 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
510 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
511 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
512 }
513 else
514 {
515 Log((RT_SUCCESS(rcStrict)
516 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
517 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
518 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
519 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
520 }
521 }
522# endif
523 /*
524 * Special read handling, so only read exactly what's needed.
525 * This is a highly unlikely scenario.
526 */
527 else
528 {
529 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
530
531 /* Check instruction length. */
532 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
533 if (RT_LIKELY(cbInstr + cbDst <= 15))
534 { /* likely */ }
535 else
536 {
537 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
538 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
539 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
540 }
541
542 /* Do the reading. */
543 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
544 if (cbToRead > 0)
545 {
546 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
547 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
548 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
549 { /* likely */ }
550 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
551 {
552 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
553 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
554 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
555 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
556 }
557 else
558 {
559 Log((RT_SUCCESS(rcStrict)
560 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
561 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
562 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
563 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
564 }
565 }
566
567 /* Update the state and probably return. */
568 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
569 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
570# if 0 /* unused */
571 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
572# endif
573 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
574 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
575 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
576 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
577 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
578 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
579 pVCpu->iem.s.pbInstrBuf = NULL;
580 if (cbToRead == cbDst)
581 return;
582 Assert(cbToRead == cbMaxRead);
583 }
584
585 /*
586 * More to read, loop.
587 */
588 cbDst -= cbMaxRead;
589 pvDst = (uint8_t *)pvDst + cbMaxRead;
590 }
591# else /* !IN_RING3 */
592 RT_NOREF(pvDst, cbDst);
593 if (pvDst || cbDst)
594 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
595# endif /* !IN_RING3 */
596}
597
598#else /* !IEM_WITH_CODE_TLB */
599
600/**
601 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
602 * exception if it fails.
603 *
604 * @returns Strict VBox status code.
605 * @param pVCpu The cross context virtual CPU structure of the
606 * calling thread.
607 * @param cbMin The minimum number of bytes relative offOpcode
608 * that must be read.
609 */
610VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
611{
612 /*
613 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
614 *
615 * First translate CS:rIP to a physical address.
616 */
617 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
618 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
619 uint8_t const cbLeft = cbOpcode - offOpcode;
620 Assert(cbLeft < cbMin);
621 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
622
623 uint32_t cbToTryRead;
624 RTGCPTR GCPtrNext;
625 if (IEM_IS_64BIT_CODE(pVCpu))
626 {
627 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
628 if (!IEM_IS_CANONICAL(GCPtrNext))
629 return iemRaiseGeneralProtectionFault0(pVCpu);
630 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
631 }
632 else
633 {
634 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
635 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
636 GCPtrNext32 += cbOpcode;
637 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
638 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
639 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
640 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
641 if (!cbToTryRead) /* overflowed */
642 {
643 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
644 cbToTryRead = UINT32_MAX;
645 /** @todo check out wrapping around the code segment. */
646 }
647 if (cbToTryRead < cbMin - cbLeft)
648 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
649 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
650
651 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
652 if (cbToTryRead > cbLeftOnPage)
653 cbToTryRead = cbLeftOnPage;
654 }
655
656 /* Restrict to opcode buffer space.
657
658 We're making ASSUMPTIONS here based on work done previously in
659 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
660 be fetched in case of an instruction crossing two pages. */
661 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
662 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
663 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
664 { /* likely */ }
665 else
666 {
667 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
668 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
669 return iemRaiseGeneralProtectionFault0(pVCpu);
670 }
671
672 PGMPTWALKFAST WalkFast;
673 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
674 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
675 &WalkFast);
676 if (RT_SUCCESS(rc))
677 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
678 else
679 {
680 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
681#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
682 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
683 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
684#endif
685 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
686 }
687 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
688 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
689
690 RTGCPHYS const GCPhys = WalkFast.GCPhys;
691 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
692
693 /*
694 * Read the bytes at this address.
695 *
696 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
697 * and since PATM should only patch the start of an instruction there
698 * should be no need to check again here.
699 */
700 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
701 {
702 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
703 cbToTryRead, PGMACCESSORIGIN_IEM);
704 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
705 { /* likely */ }
706 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
707 {
708 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
709 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
710 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
711 }
712 else
713 {
714 Log((RT_SUCCESS(rcStrict)
715 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
716 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
717 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
718 return rcStrict;
719 }
720 }
721 else
722 {
723 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
724 if (RT_SUCCESS(rc))
725 { /* likely */ }
726 else
727 {
728 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
729 return rc;
730 }
731 }
732 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
733 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
734
735 return VINF_SUCCESS;
736}
737
738#endif /* !IEM_WITH_CODE_TLB */
739#ifndef IEM_WITH_SETJMP
740
741/**
742 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
743 *
744 * @returns Strict VBox status code.
745 * @param pVCpu The cross context virtual CPU structure of the
746 * calling thread.
747 * @param pb Where to return the opcode byte.
748 */
749VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
750{
751 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
752 if (rcStrict == VINF_SUCCESS)
753 {
754 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
755 *pb = pVCpu->iem.s.abOpcode[offOpcode];
756 pVCpu->iem.s.offOpcode = offOpcode + 1;
757 }
758 else
759 *pb = 0;
760 return rcStrict;
761}
762
763#else /* IEM_WITH_SETJMP */
764
765/**
766 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
767 *
768 * @returns The opcode byte.
769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
770 */
771uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
772{
773# ifdef IEM_WITH_CODE_TLB
774 uint8_t u8;
775 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
776 return u8;
777# else
778 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
779 if (rcStrict == VINF_SUCCESS)
780 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
781 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
782# endif
783}
784
785#endif /* IEM_WITH_SETJMP */
786
787#ifndef IEM_WITH_SETJMP
788
789/**
790 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
791 *
792 * @returns Strict VBox status code.
793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
794 * @param pu16 Where to return the opcode dword.
795 */
796VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
797{
798 uint8_t u8;
799 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
800 if (rcStrict == VINF_SUCCESS)
801 *pu16 = (int8_t)u8;
802 return rcStrict;
803}
804
805
806/**
807 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
808 *
809 * @returns Strict VBox status code.
810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
811 * @param pu32 Where to return the opcode dword.
812 */
813VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
814{
815 uint8_t u8;
816 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
817 if (rcStrict == VINF_SUCCESS)
818 *pu32 = (int8_t)u8;
819 return rcStrict;
820}
821
822
823/**
824 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
825 *
826 * @returns Strict VBox status code.
827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
828 * @param pu64 Where to return the opcode qword.
829 */
830VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
831{
832 uint8_t u8;
833 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
834 if (rcStrict == VINF_SUCCESS)
835 *pu64 = (int8_t)u8;
836 return rcStrict;
837}
838
839#endif /* !IEM_WITH_SETJMP */
840
841
842#ifndef IEM_WITH_SETJMP
843
844/**
845 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
846 *
847 * @returns Strict VBox status code.
848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
849 * @param pu16 Where to return the opcode word.
850 */
851VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
852{
853 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
854 if (rcStrict == VINF_SUCCESS)
855 {
856 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
857# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
858 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
859# else
860 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
861# endif
862 pVCpu->iem.s.offOpcode = offOpcode + 2;
863 }
864 else
865 *pu16 = 0;
866 return rcStrict;
867}
868
869#else /* IEM_WITH_SETJMP */
870
871/**
872 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
873 *
874 * @returns The opcode word.
875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
876 */
877uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
878{
879# ifdef IEM_WITH_CODE_TLB
880 uint16_t u16;
881 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
882 return u16;
883# else
884 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
885 if (rcStrict == VINF_SUCCESS)
886 {
887 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
888 pVCpu->iem.s.offOpcode += 2;
889# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
890 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
891# else
892 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
893# endif
894 }
895 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
896# endif
897}
898
899#endif /* IEM_WITH_SETJMP */
900
901#ifndef IEM_WITH_SETJMP
902
903/**
904 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
905 *
906 * @returns Strict VBox status code.
907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
908 * @param pu32 Where to return the opcode double word.
909 */
910VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
911{
912 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
913 if (rcStrict == VINF_SUCCESS)
914 {
915 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
916 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
917 pVCpu->iem.s.offOpcode = offOpcode + 2;
918 }
919 else
920 *pu32 = 0;
921 return rcStrict;
922}
923
924
925/**
926 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
927 *
928 * @returns Strict VBox status code.
929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
930 * @param pu64 Where to return the opcode quad word.
931 */
932VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
933{
934 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
935 if (rcStrict == VINF_SUCCESS)
936 {
937 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
938 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
939 pVCpu->iem.s.offOpcode = offOpcode + 2;
940 }
941 else
942 *pu64 = 0;
943 return rcStrict;
944}
945
946#endif /* !IEM_WITH_SETJMP */
947
948#ifndef IEM_WITH_SETJMP
949
950/**
951 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
952 *
953 * @returns Strict VBox status code.
954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
955 * @param pu32 Where to return the opcode dword.
956 */
957VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
958{
959 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
960 if (rcStrict == VINF_SUCCESS)
961 {
962 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
963# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
964 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
965# else
966 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
967 pVCpu->iem.s.abOpcode[offOpcode + 1],
968 pVCpu->iem.s.abOpcode[offOpcode + 2],
969 pVCpu->iem.s.abOpcode[offOpcode + 3]);
970# endif
971 pVCpu->iem.s.offOpcode = offOpcode + 4;
972 }
973 else
974 *pu32 = 0;
975 return rcStrict;
976}
977
978#else /* IEM_WITH_SETJMP */
979
980/**
981 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
982 *
983 * @returns The opcode dword.
984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
985 */
986uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
987{
988# ifdef IEM_WITH_CODE_TLB
989 uint32_t u32;
990 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
991 return u32;
992# else
993 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
994 if (rcStrict == VINF_SUCCESS)
995 {
996 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
997 pVCpu->iem.s.offOpcode = offOpcode + 4;
998# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
999 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1000# else
1001 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1002 pVCpu->iem.s.abOpcode[offOpcode + 1],
1003 pVCpu->iem.s.abOpcode[offOpcode + 2],
1004 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1005# endif
1006 }
1007 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1008# endif
1009}
1010
1011#endif /* IEM_WITH_SETJMP */
1012
1013#ifndef IEM_WITH_SETJMP
1014
1015/**
1016 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1017 *
1018 * @returns Strict VBox status code.
1019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1020 * @param pu64 Where to return the opcode dword.
1021 */
1022VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1023{
1024 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1025 if (rcStrict == VINF_SUCCESS)
1026 {
1027 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1028 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1029 pVCpu->iem.s.abOpcode[offOpcode + 1],
1030 pVCpu->iem.s.abOpcode[offOpcode + 2],
1031 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1032 pVCpu->iem.s.offOpcode = offOpcode + 4;
1033 }
1034 else
1035 *pu64 = 0;
1036 return rcStrict;
1037}
1038
1039
1040/**
1041 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1042 *
1043 * @returns Strict VBox status code.
1044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1045 * @param pu64 Where to return the opcode qword.
1046 */
1047VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1048{
1049 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1050 if (rcStrict == VINF_SUCCESS)
1051 {
1052 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1053 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1054 pVCpu->iem.s.abOpcode[offOpcode + 1],
1055 pVCpu->iem.s.abOpcode[offOpcode + 2],
1056 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1057 pVCpu->iem.s.offOpcode = offOpcode + 4;
1058 }
1059 else
1060 *pu64 = 0;
1061 return rcStrict;
1062}
1063
1064#endif /* !IEM_WITH_SETJMP */
1065
1066#ifndef IEM_WITH_SETJMP
1067
1068/**
1069 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1070 *
1071 * @returns Strict VBox status code.
1072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1073 * @param pu64 Where to return the opcode qword.
1074 */
1075VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1076{
1077 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1078 if (rcStrict == VINF_SUCCESS)
1079 {
1080 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1081# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1082 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1083# else
1084 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1085 pVCpu->iem.s.abOpcode[offOpcode + 1],
1086 pVCpu->iem.s.abOpcode[offOpcode + 2],
1087 pVCpu->iem.s.abOpcode[offOpcode + 3],
1088 pVCpu->iem.s.abOpcode[offOpcode + 4],
1089 pVCpu->iem.s.abOpcode[offOpcode + 5],
1090 pVCpu->iem.s.abOpcode[offOpcode + 6],
1091 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1092# endif
1093 pVCpu->iem.s.offOpcode = offOpcode + 8;
1094 }
1095 else
1096 *pu64 = 0;
1097 return rcStrict;
1098}
1099
1100#else /* IEM_WITH_SETJMP */
1101
1102/**
1103 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1104 *
1105 * @returns The opcode qword.
1106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1107 */
1108uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1109{
1110# ifdef IEM_WITH_CODE_TLB
1111 uint64_t u64;
1112 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1113 return u64;
1114# else
1115 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1116 if (rcStrict == VINF_SUCCESS)
1117 {
1118 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1119 pVCpu->iem.s.offOpcode = offOpcode + 8;
1120# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1121 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1122# else
1123 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1124 pVCpu->iem.s.abOpcode[offOpcode + 1],
1125 pVCpu->iem.s.abOpcode[offOpcode + 2],
1126 pVCpu->iem.s.abOpcode[offOpcode + 3],
1127 pVCpu->iem.s.abOpcode[offOpcode + 4],
1128 pVCpu->iem.s.abOpcode[offOpcode + 5],
1129 pVCpu->iem.s.abOpcode[offOpcode + 6],
1130 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1131# endif
1132 }
1133 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1134# endif
1135}
1136
1137#endif /* IEM_WITH_SETJMP */
1138
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette