VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThreadedRecompiler.cpp@ 100050

Last change on this file since 100050 was 100020, checked in by vboxsync, 23 months ago

VMM/IEM: Draft for execution mode flags and translation block flags. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.5 KB
Line 
1/* $Id: IEMAllInstructionsThreadedRecompiler.cpp 100020 2023-05-31 01:09:06Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */
33# define LOG_GROUP LOG_GROUP_IEM
34#endif
35#define VMCPU_INCL_CPUM_GST_CTX
36#include <VBox/vmm/iem.h>
37#include <VBox/vmm/cpum.h>
38#include <VBox/vmm/apic.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/pgm.h>
41#include <VBox/vmm/iom.h>
42#include <VBox/vmm/em.h>
43#include <VBox/vmm/hm.h>
44#include <VBox/vmm/nem.h>
45#include <VBox/vmm/gim.h>
46#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
47# include <VBox/vmm/em.h>
48# include <VBox/vmm/hm_svm.h>
49#endif
50#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
51# include <VBox/vmm/hmvmxinline.h>
52#endif
53#include <VBox/vmm/tm.h>
54#include <VBox/vmm/dbgf.h>
55#include <VBox/vmm/dbgftrace.h>
56#ifndef TST_IEM_CHECK_MC
57# include "IEMInternal.h"
58#endif
59#include <VBox/vmm/vmcc.h>
60#include <VBox/log.h>
61#include <VBox/err.h>
62#include <VBox/param.h>
63#include <VBox/dis.h>
64#include <VBox/disopcode-x86-amd64.h>
65#include <iprt/asm-math.h>
66#include <iprt/assert.h>
67#include <iprt/string.h>
68#include <iprt/x86.h>
69
70#ifndef TST_IEM_CHECK_MC
71# include "IEMInline.h"
72# include "IEMOpHlp.h"
73# include "IEMMc.h"
74#endif
75
76#include "IEMThreadedFunctions.h"
77
78
79/*
80 * Narrow down configs here to avoid wasting time on unused configs here.
81 */
82
83#ifndef IEM_WITH_CODE_TLB
84# error The code TLB must be enabled for the recompiler.
85#endif
86
87#ifndef IEM_WITH_DATA_TLB
88# error The data TLB must be enabled for the recompiler.
89#endif
90
91#ifndef IEM_WITH_SETJMP
92# error The setjmp approach must be enabled for the recompiler.
93#endif
94
95
96/*********************************************************************************************************************************
97* Structures and Typedefs *
98*********************************************************************************************************************************/
99/**
100 * A call for the threaded call table.
101 */
102typedef struct IEMTHRDEDCALLENTRY
103{
104 /** The function to call (IEMTHREADEDFUNCS). */
105 uint16_t enmFunction;
106 uint16_t uUnused0;
107
108 /** The opcode length. */
109 uint8_t cbOpcode;
110 /** The opcode chunk number.
111 * @note sketches for discontiguous opcode support */
112 uint8_t idxOpcodeChunk;
113 /** The offset into the opcode chunk of this function.
114 * @note sketches for discontiguous opcode support */
115 uint16_t offOpcodeChunk;
116
117 /** Generic parameters. */
118 uint64_t auParams[3];
119} IEMTHRDEDCALLENTRY;
120AssertCompileSize(IEMTHRDEDCALLENTRY, sizeof(uint64_t) * 4);
121/** Pointer to a threaded call entry. */
122typedef IEMTHRDEDCALLENTRY *PIEMTHRDEDCALLENTRY;
123/** Pointer to a const threaded call entry. */
124typedef IEMTHRDEDCALLENTRY const *PCIEMTHRDEDCALLENTRY;
125
126/** @name IEM_F_XXX - Execution mode flags (IEMCPU::fExec, IEMTB::fFlags).
127 *
128 * These flags are set when entering IEM and adjusted as code is executed, such
129 * that they will always contain the current values as instructions are
130 * finished.
131 *
132 * In recompiled execution mode, (most of) these flags are included in the
133 * translation block selection key and stored in IEMTB::fFlags alongside the
134 * IEMTB_F_XXX flags. The latter flags uses bits 31 thru 24, which are all zero
135 * in IEMCPU::fExec.
136 *
137 * @{ */
138/** Mode: The block target mode mask. */
139#define IEM_F_MODE_MASK UINT32_C(0x0000000f)
140/** Mode: The IEMMODE part of the IEMTB_F_MODE_MASK value. */
141#define IEM_F_MODE_CPUMODE_MASK UINT32_C(0x00000003)
142/** X86 Mode: Bit used to indicating pre-386 CPU in 16-bit mode (for eliminating
143 * conditional in EIP/IP updating), and flat wide open CS, SS DS, and ES in
144 * 32-bit mode (for simplifying most memory accesses). */
145#define IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK UINT32_C(0x00000004)
146/** X86 Mode: Bit indicating protected mode. */
147#define IEM_F_MODE_X86_PROT_MASK UINT32_C(0x00000008)
148/** @todo mix in paging as well? it's not all that important compared to
149 * protected mode. */
150
151/** X86 Mode: 16-bit on 386 or later. */
152#define IEM_F_MODE_X86_16BIT UINT32_C(0x00000000)
153/** X86 Mode: 80286, 80186 and 8086/88 targetting blocks (EIP update opt). */
154#define IEM_F_MODE_X86_16BIT_PRE_386 UINT32_C(0x00000004)
155/** X86 Mode: 16-bit protected mode on 386 or later. */
156#define IEM_F_MODE_X86_16BIT_PROT UINT32_C(0x00000008)
157/** X86 Mode: 16-bit protected mode on 386 or later. */
158#define IEM_F_MODE_X86_16BIT_PROT_PRE_386 UINT32_C(0x0000000c)
159
160/** X86 Mode: 32-bit on 386 or later. */
161#define IEM_F_MODE_X86_32BIT UINT32_C(0x00000001)
162/** X86 Mode: 32-bit mode with wide open flat CS, SS, DS and ES. */
163#define IEM_F_MODE_X86_32BIT_FLAT UINT32_C(0x00000005)
164/** X86 Mode: 32-bit protected mode. */
165#define IEM_F_MODE_X86_32BIT_PROT UINT32_C(0x00000009)
166/** X86 Mode: 32-bit protected mode with wide open flat CS, SS, DS and ES. */
167#define IEM_F_MODE_X86_32BIT_PROT_FLAT UINT32_C(0x0000000d)
168
169/** X86 Mode: 64-bit (includes protected, but not the flat bit). */
170#define IEM_F_MODE_X86_64BIT UINT32_C(0x0000000a)
171
172
173/** Bypass access handlers when set. */
174#define IEM_F_BYPASS_HANDLERS UINT32_C(0x00000010)
175/** Have pending hardware instruction breakpoints. */
176#define IEM_F_PENDING_BRK_INSTR UINT32_C(0x00000020)
177/** Have pending hardware data breakpoints. */
178#define IEM_F_PENDING_BRK_DATA UINT32_C(0x00000040)
179
180/** X86: Have pending hardware I/O breakpoints. */
181#define IEM_F_PENDING_BRK_X86_IO UINT32_C(0x00000400)
182/** X86: Disregard the lock prefix (implied or not) when set. */
183#define IEM_F_X86_DISREGARD_LOCK UINT32_C(0x00000800)
184
185/** The CPL. */
186#define IEM_F_X86_CPL_MASK UINT32_C(0x00000300)
187
188/** X86 execution context.
189 * The IEM_F_X86_CTX_XXX values are individual flags that can be combined (with
190 * the exception of IEM_F_X86_CTX_NORMAL). This allows running VMs from SMM
191 * mode. */
192#define IEM_F_X86_CTX_MASK UINT32_C(0x0000f000)
193/** X86 context: Plain regular execution context. */
194#define IEM_F_X86_CTX_NORMAL UINT32_C(0x00000000)
195/** X86 context: VT-x enabled. */
196#define IEM_F_X86_CTX_VTX UINT32_C(0x00001000)
197/** X86 context: AMD-V enabled. */
198#define IEM_F_X86_CTX_SVM UINT32_C(0x00002000)
199/** X86 context: In AMD-V or VT-x guest mode. */
200#define IEM_F_X86_CTX_IN_GUEST UINT32_C(0x00004000)
201/** X86 context: System management mode (SMM). */
202#define IEM_F_X86_CTX_SMM UINT32_C(0x00008000)
203
204/** @} */
205
206
207/** @name IEMTB_F_XXX - Translation block flags (IEMTB::fFlags).
208 *
209 * Extends the IEM_F_XXX flags (subject to IEMTB_F_IEM_F_MASK) to make up the
210 * translation block flags. The combined flag mask (subject to
211 * IEMTB_F_KEY_MASK) is used as part of the lookup key for translation blocks.
212 *
213 * @{ */
214/** Mask of IEM_F_XXX flags included in IEMTB_F_XXX. */
215#define IEMTB_F_IEM_F_MASK UINT32_C(0x00ffffff)
216
217/** Type: The block type mask. */
218#define IEMTB_F_TYPE_MASK UINT32_C(0x03000000)
219/** Type: Purly threaded recompiler (via tables). */
220#define IEMTB_F_TYPE_THREADED UINT32_C(0x01000000)
221/** Type: Native recompilation. */
222#define IEMTB_F_TYPE_NATIVE UINT32_C(0x02000000)
223
224/** Mask of the IEMTB_F_XXX flags that are part of the TB lookup key.
225 * @note We don't */
226#define IEMTB_F_KEY_MASK ((UINT32_C(0xffffffff) & ~IEM_F_X86_CTX_MASK) | IEM_F_X86_CTX_SMM)
227/** @} */
228
229AssertCompile( (IEM_F_MODE_X86_16BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
230AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
231AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_PROT_MASK));
232AssertCompile( (IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
233AssertCompile( IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
234AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_PROT_MASK));
235AssertCompile( (IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
236AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
237AssertCompile( IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
238AssertCompile( (IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
239AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
240AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_PROT_MASK);
241
242AssertCompile( (IEM_F_MODE_X86_32BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
243AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
244AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_PROT_MASK));
245AssertCompile( (IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
246AssertCompile( IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
247AssertCompile(!(IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_PROT_MASK));
248AssertCompile( (IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
249AssertCompile(!(IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
250AssertCompile( IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
251AssertCompile( (IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
252AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
253AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_PROT_MASK);
254
255AssertCompile( (IEM_F_MODE_X86_64BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT);
256AssertCompile( IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_PROT_MASK);
257AssertCompile(!(IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
258
259
260/**
261 * Translation block.
262 */
263typedef struct IEMTB
264{
265 /** Next block with the same hash table entry. */
266 PIEMTB volatile pNext;
267 /** List on the local VCPU for blocks. */
268 RTLISTNODE LocalList;
269
270 /** @name What uniquely identifies the block.
271 * @{ */
272 RTGCPHYS GCPhysPc;
273 uint64_t uPc;
274 uint32_t fFlags;
275 union
276 {
277 struct
278 {
279 /** The CS base. */
280 uint32_t uCsBase;
281 /** The CS limit (UINT32_MAX for 64-bit code). */
282 uint32_t uCsLimit;
283 /** The CS selector value. */
284 uint16_t CS;
285 /**< Relevant X86DESCATTR_XXX bits. */
286 uint16_t fAttr;
287 } x86;
288 };
289 /** @} */
290
291 /** Number of bytes of opcodes covered by this block.
292 * @todo Support discontiguous chunks of opcodes in same block, though maybe
293 * restrict to the initial page or smth. */
294 uint32_t cbPC;
295
296 union
297 {
298 struct
299 {
300 /** Number of calls in paCalls. */
301 uint32_t cCalls;
302 /** Number of calls allocated. */
303 uint32_t cAllocated;
304 /** The call sequence table. */
305 PIEMTHRDEDCALLENTRY paCalls;
306 } Thrd;
307 };
308
309
310} IEMTB;
311
312
313/*********************************************************************************************************************************
314* Defined Constants And Macros *
315*********************************************************************************************************************************/
316#define g_apfnOneByteMap g_apfnIemThreadedRecompilerOneByteMap
317
318
319#undef IEM_MC_CALC_RM_EFF_ADDR
320#ifndef IEM_WITH_SETJMP
321# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
322 uint64_t uEffAddrInfo; \
323 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (bRm), (cbImm), &(a_GCPtrEff), &uEffAddrInfo))
324#else
325# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
326 uint64_t uEffAddrInfo; \
327 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (bRm), (cbImm), &uEffAddrInfo))
328#endif
329
330#define IEM_MC2_EMIT_CALL_1(a_enmFunction, a_uArg0) do { \
331 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
332 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
333 \
334 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \
335 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
336 pCall->enmFunction = a_enmFunction; \
337 pCall->cbOpcode = IEM_GET_INSTR_LEN(pVCpu); \
338 pCall->auParams[0] = a_uArg0; \
339 pCall->auParams[1] = 0; \
340 pCall->auParams[2] = 0; \
341 } while (0)
342#define IEM_MC2_EMIT_CALL_2(a_enmFunction, a_uArg0, a_uArg1) do { \
343 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
344 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
345 uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
346 \
347 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \
348 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
349 pCall->enmFunction = a_enmFunction; \
350 pCall->cbOpcode = IEM_GET_INSTR_LEN(pVCpu); \
351 pCall->auParams[0] = a_uArg0; \
352 pCall->auParams[1] = a_uArg1; \
353 pCall->auParams[2] = 0; \
354 } while (0)
355#define IEM_MC2_EMIT_CALL_3(a_enmFunction, a_uArg0, a_uArg1, a_uArg2) do { \
356 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
357 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
358 uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
359 uint64_t const uArg2Check = (a_uArg2); RT_NOREF(uArg2Check); \
360 \
361 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \
362 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
363 pCall->enmFunction = a_enmFunction; \
364 pCall->cbOpcode = IEM_GET_INSTR_LEN(pVCpu); \
365 pCall->auParams[0] = a_uArg0; \
366 pCall->auParams[1] = a_uArg1; \
367 pCall->auParams[2] = a_uArg2; \
368 } while (0)
369
370
371/*
372 * IEM_MC_DEFER_TO_CIMPL_0 is easily wrapped up.
373 *
374 * Doing so will also take care of IEMOP_RAISE_DIVIDE_ERROR, IEMOP_RAISE_INVALID_LOCK_PREFIX,
375 * IEMOP_RAISE_INVALID_OPCODE and their users.
376 */
377#undef IEM_MC_DEFER_TO_CIMPL_0
378#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) iemThreadedRecompilerMcDeferToCImpl0(pVCpu, a_pfnCImpl)
379
380typedef IEM_CIMPL_DECL_TYPE_0(FNIEMCIMPL0);
381typedef FNIEMCIMPL0 *PFNIEMCIMPL0;
382
383DECLINLINE(VBOXSTRICTRC) iemThreadedRecompilerMcDeferToCImpl0(PVMCPUCC pVCpu, PFNIEMCIMPL0 pfnCImpl)
384{
385 return pfnCImpl(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
386}
387
388/** @todo deal with IEM_MC_DEFER_TO_CIMPL_1, IEM_MC_DEFER_TO_CIMPL_2 and
389 * IEM_MC_DEFER_TO_CIMPL_3 as well. */
390
391/*
392 * Include the "annotated" IEMAllInstructions*.cpp.h files.
393 */
394#include "IEMThreadedInstructions.cpp.h"
395
396
397
398/*
399 * Real code.
400 */
401
402static VBOXSTRICTRC iemThreadedCompile(PVMCCV pVM, PVMCPUCC pVCpu)
403{
404 RT_NOREF(pVM, pVCpu, pTb);
405 return VERR_NOT_IMPLEMENTED;
406}
407
408
409static VBOXSTRICTRC iemThreadedCompileLongJumped(PVMCCV pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
410{
411 RT_NOREF(pVM, pVCpu);
412 return rcStrict;
413}
414
415
416static PIEMTB iemThreadedTbLookup(PVMCCV pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysPC, uint64_t uPc)
417{
418 RT_NOREF(pVM, pVCpu, GCPhysPC, uPc);
419 return NULL;
420}
421
422
423static VBOXSTRICTRC iemThreadedTbExec(PVMCCV pVM, PVMCPUCC pVCpu, PIEMTB pTb)
424{
425 RT_NOREF(pVM, pVCpu, pTb);
426 return VERR_NOT_IMPLEMENTED;
427}
428
429
430/**
431 * This is called when the PC doesn't match the current pbInstrBuf.
432 */
433static uint64_t iemGetPcWithPhysAndCodeMissed(PVMCPUCC pVCpu, uint64_t const uPc, PRTGCPHYS pPhys)
434{
435 /** @todo see iemOpcodeFetchBytesJmp */
436 pVCpu->iem.s.pbInstrBuf = NULL;
437
438 pVCpu->iem.s.offInstrNextByte = 0;
439 pVCpu->iem.s.offCurInstrStart = 0;
440 pVCpu->iem.s.cbInstrBuf = 0;
441 pVCpu->iem.s.cbInstrBufTotal = 0;
442
443}
444
445
446/** @todo need private inline decl for throw/nothrow matching IEM_WITH_SETJMP? */
447DECL_INLINE_THROW(uint64_t) iemGetPcWithPhysAndCode(PVMCPUCC pVCpu, PRTGCPHYS pPhys)
448{
449 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
450 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
451 if (pVCpu->iem.s.pbInstrBuf)
452 {
453 uint64_t off = uPc - pVCpu->iem.s.uInstrBufPc;
454 if (off < pVCpu->iem.s.cbInstrBufTotal)
455 {
456 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
457 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
458 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
459 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
460 else
461 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
462
463 *pPhys = pVCpu->iem.s.GCPhysInstrBuf + off;
464 return uPc;
465 }
466 }
467 return iemGetPcWithPhysAndCodeMissed(pVCpu, uPc, pPhys);
468}
469
470
471VMMDECL(VBOXSTRICTRC) IEMExecRecompilerThreaded(PVMCC pVM, PVMCPUCC pVCpu)
472{
473 /*
474 * Init the execution environment.
475 */
476 iemInitExec(pVCpu, false);
477 ...
478
479 /*
480 * Run-loop.
481 *
482 * If we're using setjmp/longjmp we combine all the catching here to avoid
483 * having to call setjmp for each block we're executing.
484 */
485 for (;;)
486 {
487 PIEMTB pTb = NULL;
488 VBOXSTRICTRC rcStrict;
489#ifdef IEM_WITH_SETJMP
490 IEM_TRY_SETJMP(pVCpu, rcStrict)
491#endif
492 {
493 for (;;)
494 {
495 /* Translate PC to physical address, we'll need this for both lookup and compilation. */
496 RTGCPHYS GCPhysPC;
497 uint64_t const uPC = iemGetPcWithPhysAndCode(pVCpu, &GCPhysPC);
498
499 pTb = iemThreadedTbLookup(pVM, pVCpu, GCPhysPC, uPc);
500 if (pTb)
501 rcStrict = iemThreadedTbExec(pVM, pVCpu, pTb);
502 else
503 rcStrict = iemThreadedCompile(pVM, pVCpu, GCPhysPC, uPc);
504 if (rcStrict == VINF_SUCCESS)
505 { /* likely */ }
506 else
507 return rcStrict;
508 }
509 }
510#ifdef IEM_WITH_SETJMP
511 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
512 {
513 pVCpu->iem.s.cLongJumps++;
514 if (pTb)
515 return rcStrict;
516 return iemThreadedCompileLongJumped(pVM, pVCpu, rcStrict);
517 }
518 IEM_CATCH_LONGJMP_END(pVCpu);
519#endif
520 }
521}
522
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette