VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 109021

Last change on this file since 109021 was 108953, checked in by vboxsync, 4 weeks ago

VMM/IEM: Made the new A64 tables link and finalized names. jiraref:VBP-1598

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 51.7 KB
Line 
1/* $Id: IEMAll.cpp 108953 2025-04-11 23:49:15Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113
114/*********************************************************************************************************************************
115* Header Files *
116*********************************************************************************************************************************/
117#define LOG_GROUP LOG_GROUP_IEM
118#define VMCPU_INCL_CPUM_GST_CTX
119#ifdef IN_RING0
120# define VBOX_VMM_TARGET_X86
121#endif
122#include <VBox/vmm/iem.h>
123#include <VBox/vmm/cpum.h>
124#include <VBox/vmm/pdmapic.h>
125#include <VBox/vmm/pdm.h>
126#include <VBox/vmm/pgm.h>
127#include <VBox/vmm/iom.h>
128#include <VBox/vmm/em.h>
129#include <VBox/vmm/hm.h>
130#include <VBox/vmm/nem.h>
131#include <VBox/vmm/gcm.h>
132#include <VBox/vmm/gim.h>
133#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
134# include <VBox/vmm/em.h>
135# include <VBox/vmm/hm_svm.h>
136#endif
137#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
138# include <VBox/vmm/hmvmxinline.h>
139#endif
140#include <VBox/vmm/tm.h>
141#include <VBox/vmm/dbgf.h>
142#include <VBox/vmm/dbgftrace.h>
143#include "IEMInternal.h"
144#include <VBox/vmm/vmcc.h>
145#include <VBox/log.h>
146#include <VBox/err.h>
147#include <VBox/param.h>
148#include <VBox/dis.h>
149#include <iprt/asm-math.h>
150#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
151# include <iprt/asm-amd64-x86.h>
152#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
153# include <iprt/asm-arm.h>
154#endif
155#include <iprt/assert.h>
156#include <iprt/string.h>
157#include <iprt/x86.h>
158
159#include "IEMInline.h"
160#include "IEMInlineExec.h"
161#ifdef VBOX_VMM_TARGET_X86
162# include "target-x86/IEMInline-x86.h"
163# include "target-x86/IEMInlineDecode-x86.h"
164# include "target-x86/IEMInlineExec-x86.h"
165#elif defined(VBOX_VMM_TARGET_ARMV8)
166# include "target-armv8/IEMInline-armv8.h"
167# include "target-armv8/IEMAllIntprA64Tables-armv8.h"
168# include "target-armv8/IEMInlineExec-armv8.h"
169#endif
170
171
172
173/**
174 * Initializes the decoder state.
175 *
176 * iemReInitDecoder is mostly a copy of this function.
177 *
178 * @param pVCpu The cross context virtual CPU structure of the
179 * calling thread.
180 * @param fExecOpts Optional execution flags:
181 * - IEM_F_BYPASS_HANDLERS
182 * - IEM_F_X86_DISREGARD_LOCK
183 */
184DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
185{
186 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
187 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
188#ifdef VBOX_STRICT
189 iemInitDecoderStrictTarget(pVCpu);
190#endif
191
192 /* Execution state: */
193 uint32_t fExec;
194 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
195
196 /* Decoder state: */
197#ifdef VBOX_VMM_TARGET_X86
198 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_X86_CPUMODE_MASK; /** @todo check if this is correct... */
199 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_X86_CPUMODE_MASK;
200 if ((fExec & IEM_F_MODE_X86_CPUMODE_MASK) != IEMMODE_64BIT)
201 {
202 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_X86_CPUMODE_MASK; /** @todo check if this is correct... */
203 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_X86_CPUMODE_MASK;
204 }
205 else
206 {
207 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
208 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
209 }
210 pVCpu->iem.s.fPrefixes = 0;
211 pVCpu->iem.s.uRexReg = 0;
212 pVCpu->iem.s.uRexB = 0;
213 pVCpu->iem.s.uRexIndex = 0;
214 pVCpu->iem.s.idxPrefix = 0;
215 pVCpu->iem.s.uVex3rdReg = 0;
216 pVCpu->iem.s.uVexLength = 0;
217 pVCpu->iem.s.fEvexStuff = 0;
218 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
219 pVCpu->iem.s.offModRm = 0;
220#endif /* VBOX_VMM_TARGET_X86 */
221#ifdef IEM_WITH_CODE_TLB
222 pVCpu->iem.s.pbInstrBuf = NULL;
223 pVCpu->iem.s.offInstrNextByte = 0;
224# ifdef VBOX_VMM_TARGET_X86
225 pVCpu->iem.s.offCurInstrStart = 0;
226# endif
227# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
228 pVCpu->iem.s.offOpcode = 0;
229# endif
230# ifdef VBOX_STRICT
231 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
232# ifdef VBOX_VMM_TARGET_X86
233 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
234# endif
235 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
236 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
237# endif
238#else /* !IEM_WITH_CODE_TLB */
239 pVCpu->iem.s.offOpcode = 0;
240 pVCpu->iem.s.cbOpcode = 0;
241#endif /* !IEM_WITH_CODE_TLB */
242 pVCpu->iem.s.cActiveMappings = 0;
243 pVCpu->iem.s.iNextMapping = 0;
244 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
245
246#ifdef DBGFTRACE_ENABLED
247 iemInitDecoderTraceTargetPc(pVCpu, fExec);
248#endif
249}
250
251
252/**
253 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
254 *
255 * This is mostly a copy of iemInitDecoder.
256 *
257 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
258 */
259DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
260{
261 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
262#ifdef VBOX_STRICT
263 iemInitDecoderStrictTarget(pVCpu);
264#endif
265
266 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
267 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
268 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
269
270#ifdef VBOX_VMM_TARGET_X86
271 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
272 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
273 pVCpu->iem.s.enmEffAddrMode = enmMode;
274 if (enmMode != IEMMODE_64BIT)
275 {
276 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
277 pVCpu->iem.s.enmEffOpSize = enmMode;
278 }
279 else
280 {
281 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
282 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
283 }
284 pVCpu->iem.s.fPrefixes = 0;
285 pVCpu->iem.s.uRexReg = 0;
286 pVCpu->iem.s.uRexB = 0;
287 pVCpu->iem.s.uRexIndex = 0;
288 pVCpu->iem.s.idxPrefix = 0;
289 pVCpu->iem.s.uVex3rdReg = 0;
290 pVCpu->iem.s.uVexLength = 0;
291 pVCpu->iem.s.fEvexStuff = 0;
292 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
293 pVCpu->iem.s.offModRm = 0;
294#endif
295#ifdef IEM_WITH_CODE_TLB
296 if (pVCpu->iem.s.pbInstrBuf)
297 {
298# ifdef VBOX_VMM_TARGET_X86
299 uint64_t off = (enmMode == IEMMODE_64BIT
300 ? pVCpu->cpum.GstCtx.rip
301 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
302 - pVCpu->iem.s.uInstrBufPc;
303 if (off < pVCpu->iem.s.cbInstrBufTotal)
304# elif defined(VBOX_VMM_TARGET_ARMV8)
305 uint64_t const off = pVCpu->cpum.GstCtx.Pc.u64 - pVCpu->iem.s.uInstrBufPc;
306 if (off + sizeof(uint32_t) <= pVCpu->iem.s.cbInstrBufTotal)
307# endif
308 {
309 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
310# ifdef VBOX_VMM_TARGET_X86
311 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
312 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
313 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
314 else
315 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
316# endif
317 }
318 else
319 {
320 pVCpu->iem.s.pbInstrBuf = NULL;
321 pVCpu->iem.s.offInstrNextByte = 0;
322# ifdef VBOX_VMM_TARGET_X86
323 pVCpu->iem.s.offCurInstrStart = 0;
324 pVCpu->iem.s.cbInstrBuf = 0;
325# endif
326 pVCpu->iem.s.cbInstrBufTotal = 0;
327 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
328 }
329 }
330 else
331 {
332 pVCpu->iem.s.offInstrNextByte = 0;
333# ifdef VBOX_VMM_TARGET_X86
334 pVCpu->iem.s.offCurInstrStart = 0;
335 pVCpu->iem.s.cbInstrBuf = 0;
336# endif
337 pVCpu->iem.s.cbInstrBufTotal = 0;
338# ifdef VBOX_STRICT
339 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
340# endif
341 }
342# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
343 pVCpu->iem.s.offOpcode = 0;
344# endif
345#else /* !IEM_WITH_CODE_TLB */
346 pVCpu->iem.s.cbOpcode = 0;
347 pVCpu->iem.s.offOpcode = 0;
348#endif /* !IEM_WITH_CODE_TLB */
349 Assert(pVCpu->iem.s.cActiveMappings == 0);
350 pVCpu->iem.s.iNextMapping = 0;
351 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
352 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
353
354#ifdef DBGFTRACE_ENABLED
355 iemInitDecoderTraceTargetPc(pVCpu, pVCpu->iem.s.fExec);
356#endif
357}
358
359
360/**
361 * Prefetch opcodes the first time when starting executing.
362 *
363 * @returns Strict VBox status code.
364 * @param pVCpu The cross context virtual CPU structure of the
365 * calling thread.
366 * @param fExecOpts Optional execution flags:
367 * - IEM_F_BYPASS_HANDLERS
368 * - IEM_F_X86_DISREGARD_LOCK
369 */
370DECLINLINE(VBOXSTRICTRC) iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
371{
372 iemInitDecoder(pVCpu, fExecOpts);
373
374#ifndef IEM_WITH_CODE_TLB
375 return iemOpcodeFetchPrefetch(pVCpu);
376#else
377 return VINF_SUCCESS;
378#endif
379}
380
381
382#ifdef LOG_ENABLED
383/**
384 * Logs the current instruction.
385 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
386 * @param pszFunction The IEM function doing the execution.
387 */
388static void iemLogCurInstr(PVMCPUCC pVCpu, const char *pszFunction) RT_NOEXCEPT
389{
390# ifdef IN_RING3
391 if (LogIs2Enabled())
392 {
393 char szInstr[256];
394 uint32_t cbInstr = 0;
395 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
396 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
397 szInstr, sizeof(szInstr), &cbInstr);
398
399# ifdef VBOX_VMM_TARGET_X86
400 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
401 Log2(("**** %s fExec=%x\n"
402 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
403 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
404 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
405 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
406 " %s\n"
407 , pszFunction, pVCpu->iem.s.fExec,
408 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
409 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
410 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
411 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
412 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
413 szInstr));
414# elif defined(VBOX_VMM_TARGET_ARMV8)
415 Log2(("**** %s fExec=%x\n"
416 " x0=%016RX64 x1=%016RX64 x2=%016RX64 x3=%016RX64\n"
417 " x4=%016RX64 x5=%016RX64 x6=%016RX64 x7=%016RX64\n"
418 " x8=%016RX64 x9=%016RX64 x10=%016RX64 x11=%016RX64\n"
419 " x12=%016RX64 x13=%016RX64 x14=%016RX64 x15=%016RX64\n"
420 " x16=%016RX64 x17=%016RX64 x18=%016RX64 x19=%016RX64\n"
421 " x20=%016RX64 x21=%016RX64 x22=%016RX64 x23=%016RX64\n"
422 " x24=%016RX64 x25=%016RX64 x26=%016RX64 x27=%016RX64\n"
423 " x28=%016RX64 bp=%016RX64 lr=%016RX64 sp=%016RX64\n"
424 " pc=%016RX64 psr=%08RX64 EL%u\n"
425 " %s\n"
426 , pszFunction, pVCpu->iem.s.fExec,
427 pVCpu->cpum.GstCtx.aGRegs[0], pVCpu->cpum.GstCtx.aGRegs[1], pVCpu->cpum.GstCtx.aGRegs[2], pVCpu->cpum.GstCtx.aGRegs[3],
428 pVCpu->cpum.GstCtx.aGRegs[4], pVCpu->cpum.GstCtx.aGRegs[5], pVCpu->cpum.GstCtx.aGRegs[6], pVCpu->cpum.GstCtx.aGRegs[7],
429 pVCpu->cpum.GstCtx.aGRegs[8], pVCpu->cpum.GstCtx.aGRegs[9], pVCpu->cpum.GstCtx.aGRegs[10], pVCpu->cpum.GstCtx.aGRegs[11],
430 pVCpu->cpum.GstCtx.aGRegs[12], pVCpu->cpum.GstCtx.aGRegs[13], pVCpu->cpum.GstCtx.aGRegs[14], pVCpu->cpum.GstCtx.aGRegs[15],
431 pVCpu->cpum.GstCtx.aGRegs[16], pVCpu->cpum.GstCtx.aGRegs[17], pVCpu->cpum.GstCtx.aGRegs[18], pVCpu->cpum.GstCtx.aGRegs[19],
432 pVCpu->cpum.GstCtx.aGRegs[20], pVCpu->cpum.GstCtx.aGRegs[21], pVCpu->cpum.GstCtx.aGRegs[22], pVCpu->cpum.GstCtx.aGRegs[23],
433 pVCpu->cpum.GstCtx.aGRegs[24], pVCpu->cpum.GstCtx.aGRegs[25], pVCpu->cpum.GstCtx.aGRegs[26], pVCpu->cpum.GstCtx.aGRegs[27],
434 pVCpu->cpum.GstCtx.aGRegs[28], pVCpu->cpum.GstCtx.aGRegs[29], pVCpu->cpum.GstCtx.aGRegs[30],
435 pVCpu->cpum.GstCtx.aSpReg[IEM_F_MODE_ARM_GET_EL(pVCpu->iem.s.fExec) > 0],
436 pVCpu->cpum.GstCtx.Pc, pVCpu->cpum.GstCtx.fPState, IEM_F_MODE_ARM_GET_EL(pVCpu->iem.s.fExec),
437 szInstr));
438# else
439# error "port me"
440# endif
441
442 /* This stuff sucks atm. as it fills the log with MSRs. */
443 //if (LogIs3Enabled())
444 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
445 return;
446 }
447# endif
448
449# ifdef VBOX_VMM_TARGET_X86
450 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
451 pszFunction, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
452 pVCpu->cpum.GstCtx.eflags.u));
453# define LOGFLOW_REG_STATE_EX(a_pszName, a_szExtraFmt, ...) \
454 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x" a_szExtraFmt "\n", \
455 (a_pszName), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, \
456 pVCpu->cpum.GstCtx.eflags.u, __VA_ARGS__))
457
458# elif defined(VBOX_VMM_TARGET_ARMV8)
459 LogFlow(("%s: pc=%08RX64 lr=%08RX64 sp=%08RX64 psr=%08RX64 EL%u\n",
460 pszFunction, pVCpu->cpum.GstCtx.Pc, pVCpu->cpum.GstCtx.aGRegs[ARMV8_A64_REG_LR],
461 pVCpu->cpum.GstCtx.aSpReg[IEM_F_MODE_ARM_GET_EL(pVCpu->iem.s.fExec) > 0], pVCpu->cpum.GstCtx.fPState,
462 IEM_F_MODE_ARM_GET_EL(pVCpu->iem.s.fExec) ));
463# define LOGFLOW_REG_STATE_EX(a_pszName, a_szExtraFmt, ...) \
464 LogFlow(("%s: pc=%08RX64 lr=%08RX64 sp=%08RX64 psr=%08RX64 EL%u" a_szExtraFmt "\n", \
465 (a_pszName), pVCpu->cpum.GstCtx.Pc, pVCpu->cpum.GstCtx.aGRegs[ARMV8_A64_REG_LR], \
466 pVCpu->cpum.GstCtx.aSpReg[IEM_F_MODE_ARM_GET_EL(pVCpu->iem.s.fExec) > 0], pVCpu->cpum.GstCtx.fPState, \
467 IEM_F_MODE_ARM_GET_EL(pVCpu->iem.s.fExec), __VA_ARGS__))
468 LOGFLOW_REG_STATE_EX(pszFunction, "",1);
469
470# else
471# error "port me"
472# endif
473 RT_NOREF_PV(pVCpu);
474}
475#endif /* LOG_ENABLED */
476
477
478#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
479/**
480 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
481 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
482 *
483 * @returns Modified rcStrict.
484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
485 * @param rcStrict The instruction execution status.
486 */
487static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
488{
489 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
490 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
491 {
492 /* VMX preemption timer takes priority over NMI-window exits. */
493 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
494 {
495 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
496 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
497 }
498 /*
499 * Check remaining intercepts.
500 *
501 * NMI-window and Interrupt-window VM-exits.
502 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
503 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
504 *
505 * See Intel spec. 26.7.6 "NMI-Window Exiting".
506 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
507 */
508 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
509 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
510 && !TRPMHasTrap(pVCpu))
511 {
512 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
513 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
514 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
515 {
516 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
517 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
518 }
519 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
520 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
521 {
522 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
523 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
524 }
525 }
526 }
527 /* TPR-below threshold/APIC write has the highest priority. */
528 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
529 {
530 rcStrict = iemVmxApicWriteEmulation(pVCpu);
531 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
532 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
533 }
534 /* MTF takes priority over VMX-preemption timer. */
535 else
536 {
537 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
538 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
539 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
540 }
541 return rcStrict;
542}
543#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
544
545
546/**
547 * The actual code execution bits of IEMExecOne, IEMExecOneWithPrefetchedByPC,
548 * IEMExecOneBypass and friends.
549 *
550 * Similar code is found in IEMExecLots.
551 *
552 * @return Strict VBox status code.
553 * @param pVCpu The cross context virtual CPU structure of the
554 * calling EMT.
555 * @param pszFunction The calling function name.
556 * @tparam a_fExecuteInhibit X86: If set, execute the instruction following
557 * CLI, POP SS and MOV SS,GR.
558 */
559template<bool const a_fExecuteInhibit>
560DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, const char *pszFunction)
561{
562 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
563 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
564 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
565 RT_NOREF_PV(pszFunction);
566
567 VBOXSTRICTRC rcStrict;
568 IEM_TRY_SETJMP(pVCpu, rcStrict)
569 {
570 rcStrict = iemExecDecodeAndInterpretTargetInstruction(pVCpu);
571 }
572 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
573 {
574 pVCpu->iem.s.cLongJumps++;
575 }
576 IEM_CATCH_LONGJMP_END(pVCpu);
577 if (rcStrict == VINF_SUCCESS)
578 pVCpu->iem.s.cInstructions++;
579 if (pVCpu->iem.s.cActiveMappings > 0)
580 {
581 Assert(rcStrict != VINF_SUCCESS);
582 iemMemRollback(pVCpu);
583 }
584 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
585 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
586 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
587
588//#ifdef DEBUG
589// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
590//#endif
591
592#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
593 /*
594 * Perform any VMX nested-guest instruction boundary actions.
595 *
596 * If any of these causes a VM-exit, we must skip executing the next
597 * instruction (would run into stale page tables). A VM-exit makes sure
598 * there is no interrupt-inhibition, so that should ensure we don't go
599 * to try execute the next instruction. Clearing a_fExecuteInhibit is
600 * problematic because of the setjmp/longjmp clobbering above.
601 */
602 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
603 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
604 || rcStrict != VINF_SUCCESS)
605 { /* likely */ }
606 else
607 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
608#endif
609
610#ifdef VBOX_VMM_TARGET_X86
611 /* Execute the next instruction as well if a cli, pop ss or
612 mov ss, Gr has just completed successfully. */
613 if RT_CONSTEXPR_IF(a_fExecuteInhibit)
614 {
615 if ( rcStrict == VINF_SUCCESS
616 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
617 {
618 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu,
619 pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
620 if (rcStrict == VINF_SUCCESS)
621 {
622# ifdef LOG_ENABLED
623 iemLogCurInstr(pVCpu, pszFunction);
624# endif
625 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
626 {
627 rcStrict = iemExecDecodeAndInterpretTargetInstruction(pVCpu);
628 }
629 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
630 {
631 pVCpu->iem.s.cLongJumps++;
632 }
633 IEM_CATCH_LONGJMP_END(pVCpu);
634 if (rcStrict == VINF_SUCCESS)
635 {
636 pVCpu->iem.s.cInstructions++;
637# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
638 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
639 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
640 { /* likely */ }
641 else
642 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
643# endif
644 }
645 if (pVCpu->iem.s.cActiveMappings > 0)
646 {
647 Assert(rcStrict != VINF_SUCCESS);
648 iemMemRollback(pVCpu);
649 }
650 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
651 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
652 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
653 }
654 else if (pVCpu->iem.s.cActiveMappings > 0)
655 iemMemRollback(pVCpu);
656 /** @todo drop this after we bake this change into RIP advancing. */
657 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
658 }
659 }
660#endif /* VBOX_VMM_TARGET_X86 */
661
662 /*
663 * Return value fiddling, statistics and sanity assertions.
664 */
665 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
666
667#ifdef VBOX_STRICT
668 iemInitExecTailStrictTarget(pVCpu);
669#endif
670 return rcStrict;
671}
672
673
674/**
675 * Execute one instruction.
676 *
677 * @return Strict VBox status code.
678 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
679 */
680VMM_INT_DECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
681{
682 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
683#ifdef LOG_ENABLED
684 iemLogCurInstr(pVCpu, "IEMExecOne");
685#endif
686
687 /*
688 * Do the decoding and emulation.
689 */
690 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
691 if (rcStrict == VINF_SUCCESS)
692 rcStrict = iemExecOneInner<true>(pVCpu, "IEMExecOne");
693 else if (pVCpu->iem.s.cActiveMappings > 0)
694 iemMemRollback(pVCpu);
695
696#ifdef LOG_ENABLED
697 if (rcStrict != VINF_SUCCESS)
698 LOGFLOW_REG_STATE_EX("IEMExecOne", " - rcStrict=%Rrc", VBOXSTRICTRC_VAL(rcStrict));
699#endif
700 return rcStrict;
701}
702
703
704VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
705 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
706{
707 VBOXSTRICTRC rcStrict;
708 if ( cbOpcodeBytes
709 && iemRegGetPC(pVCpu) == OpcodeBytesPC)
710 {
711 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
712#ifdef IEM_WITH_CODE_TLB
713 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
714 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
715 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
716# ifdef VBOX_VMM_TARGET_X86
717 pVCpu->iem.s.offCurInstrStart = 0;
718 pVCpu->iem.s.offInstrNextByte = 0;
719# endif
720 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
721#else
722 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
723 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
724#endif
725 rcStrict = VINF_SUCCESS;
726 }
727 else
728 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
729 if (rcStrict == VINF_SUCCESS)
730 rcStrict = iemExecOneInner<true>(pVCpu, "IEMExecOneWithPrefetchedByPC");
731 else if (pVCpu->iem.s.cActiveMappings > 0)
732 iemMemRollback(pVCpu);
733
734 return rcStrict;
735}
736
737
738VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypass(PVMCPUCC pVCpu)
739{
740 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
741 if (rcStrict == VINF_SUCCESS)
742 rcStrict = iemExecOneInner<false>(pVCpu, "IEMExecOneBypass");
743 else if (pVCpu->iem.s.cActiveMappings > 0)
744 iemMemRollback(pVCpu);
745
746 return rcStrict;
747}
748
749
750VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
751 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
752{
753 VBOXSTRICTRC rcStrict;
754 if ( cbOpcodeBytes
755 && iemRegGetPC(pVCpu) == OpcodeBytesPC)
756 {
757 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
758#ifdef IEM_WITH_CODE_TLB
759 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
760 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
761 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
762# ifdef VBOX_VMM_TARGET_X86
763 pVCpu->iem.s.offCurInstrStart = 0;
764 pVCpu->iem.s.offInstrNextByte = 0;
765# endif
766 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
767#else
768 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
769 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
770#endif
771 rcStrict = VINF_SUCCESS;
772 }
773 else
774 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
775 if (rcStrict == VINF_SUCCESS)
776 rcStrict = iemExecOneInner<false>(pVCpu, "IEMExecOneBypassWithPrefetchedByPC");
777 else if (pVCpu->iem.s.cActiveMappings > 0)
778 iemMemRollback(pVCpu);
779
780 return rcStrict;
781}
782
783
784/**
785 * For handling split cacheline lock operations when the host has split-lock
786 * detection enabled.
787 *
788 * This will cause the interpreter to disregard the lock prefix and implicit
789 * locking (xchg).
790 *
791 * @returns Strict VBox status code.
792 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
793 */
794VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
795{
796 /*
797 * Do the decoding and emulation.
798 */
799 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
800 if (rcStrict == VINF_SUCCESS)
801 rcStrict = iemExecOneInner<true>(pVCpu, "IEMExecOneIgnoreLock");
802 else if (pVCpu->iem.s.cActiveMappings > 0)
803 iemMemRollback(pVCpu);
804
805#ifdef LOG_ENABLED
806 if (rcStrict != VINF_SUCCESS)
807 LOGFLOW_REG_STATE_EX("IEMExecOneIgnoreLock", " - rcStrict=%Rrc", VBOXSTRICTRC_VAL(rcStrict));
808#endif
809 return rcStrict;
810}
811
812
813/**
814 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
815 * inject a pending TRPM trap.
816 */
817VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
818{
819 Assert(TRPMHasTrap(pVCpu));
820#ifdef VBOX_VMM_TARGET_X86
821
822 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
823 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
824 {
825 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
826# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
827 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
828 if (fIntrEnabled)
829 {
830 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
831 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
832 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
833 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
834 else
835 {
836 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
837 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
838 }
839 }
840# else
841 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
842# endif
843 if (fIntrEnabled)
844 {
845 uint8_t u8TrapNo;
846 TRPMEVENT enmType;
847 uint32_t uErrCode;
848 RTGCPTR uCr2;
849 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
850 AssertRC(rc2);
851 Assert(enmType == TRPM_HARDWARE_INT);
852 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
853
854 TRPMResetTrap(pVCpu);
855
856# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
857 /* Injecting an event may cause a VM-exit. */
858 if ( rcStrict != VINF_SUCCESS
859 && rcStrict != VINF_IEM_RAISED_XCPT)
860 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
861# else
862 NOREF(rcStrict);
863# endif
864 }
865 }
866
867 return VINF_SUCCESS;
868
869#else /* !VBOX_VMM_TARGET_X86 */
870 RT_NOREF(pVCpu);
871 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
872#endif /* !VBOX_VMM_TARGET_X86 */
873}
874
875
876VMM_INT_DECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
877{
878 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
879 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
880 Assert(cMaxInstructions > 0);
881
882 /*
883 * See if there is an interrupt pending in TRPM, inject it if we can.
884 */
885 /** @todo What if we are injecting an exception and not an interrupt? Is that
886 * possible here? For now we assert it is indeed only an interrupt. */
887 if (!TRPMHasTrap(pVCpu))
888 { /* likely */ }
889 else
890 {
891 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
892 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
893 { /*likely */ }
894 else
895 return rcStrict;
896 }
897
898 /*
899 * Initial decoder init w/ prefetch, then setup setjmp.
900 */
901 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
902 if (rcStrict == VINF_SUCCESS)
903 {
904 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
905 IEM_TRY_SETJMP(pVCpu, rcStrict)
906 {
907 /*
908 * The run loop. We limit ourselves to 4096 instructions right now.
909 */
910 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
911 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
912 for (;;)
913 {
914 /*
915 * Log the state.
916 */
917#ifdef LOG_ENABLED
918 iemLogCurInstr(pVCpu, "IEMExecLots");
919#endif
920
921 /*
922 * Do the decoding and emulation.
923 */
924 rcStrict = iemExecDecodeAndInterpretTargetInstruction(pVCpu);
925#if defined(VBOX_STRICT) && defined(VBOX_VMM_TARGET_X86)
926 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
927#endif
928 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
929 {
930 Assert(pVCpu->iem.s.cActiveMappings == 0);
931 pVCpu->iem.s.cInstructions++;
932
933#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
934 /* Perform any VMX nested-guest instruction boundary actions. */
935 uint64_t fCpu = pVCpu->fLocalForcedActions;
936 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
937 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
938 { /* likely */ }
939 else
940 {
941 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
942 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
943 fCpu = pVCpu->fLocalForcedActions;
944 else
945 {
946 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
947 break;
948 }
949 }
950#endif
951 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
952 {
953#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
954 uint64_t fCpu = pVCpu->fLocalForcedActions;
955#endif
956 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
957 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
958 | VMCPU_FF_TLB_FLUSH
959 | VMCPU_FF_UNHALT );
960
961 if (RT_LIKELY( iemExecLoopTargetCheckMaskedCpuFFs(pVCpu, fCpu)
962 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
963 {
964 if (--cMaxInstructionsGccStupidity > 0)
965 {
966 /* Poll timers every now an then according to the caller's specs. */
967 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
968 || !TMTimerPollBool(pVM, pVCpu))
969 {
970 Assert(pVCpu->iem.s.cActiveMappings == 0);
971 iemReInitDecoder(pVCpu);
972 continue;
973 }
974 }
975 }
976 }
977 Assert(pVCpu->iem.s.cActiveMappings == 0);
978 }
979 else if (pVCpu->iem.s.cActiveMappings > 0)
980 iemMemRollback(pVCpu);
981 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
982 break;
983 }
984 }
985 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
986 {
987 if (pVCpu->iem.s.cActiveMappings > 0)
988 iemMemRollback(pVCpu);
989#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
990 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
991#endif
992 pVCpu->iem.s.cLongJumps++;
993 }
994 IEM_CATCH_LONGJMP_END(pVCpu);
995
996#ifdef VBOX_STRICT
997 iemInitExecTailStrictTarget(pVCpu);
998#endif
999 }
1000 else
1001 {
1002 if (pVCpu->iem.s.cActiveMappings > 0)
1003 iemMemRollback(pVCpu);
1004
1005#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1006 /*
1007 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
1008 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
1009 */
1010 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
1011#endif
1012 }
1013
1014 /*
1015 * Maybe re-enter raw-mode and log.
1016 */
1017#ifdef LOG_ENABLED
1018 if (rcStrict != VINF_SUCCESS)
1019 LOGFLOW_REG_STATE_EX("IEMExecLots", " - rcStrict=%Rrc", VBOXSTRICTRC_VAL(rcStrict));
1020#endif
1021 if (pcInstructions)
1022 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
1023 return rcStrict;
1024}
1025
1026
1027/**
1028 * Interface used by EMExecuteExec, does exit statistics and limits.
1029 *
1030 * @returns Strict VBox status code.
1031 * @param pVCpu The cross context virtual CPU structure.
1032 * @param fWillExit To be defined.
1033 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
1034 * @param cMaxInstructions Maximum number of instructions to execute.
1035 * @param cMaxInstructionsWithoutExits
1036 * The max number of instructions without exits.
1037 * @param pStats Where to return statistics.
1038 */
1039VMM_INT_DECL(VBOXSTRICTRC)
1040IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
1041 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
1042{
1043 NOREF(fWillExit); /** @todo define flexible exit crits */
1044
1045 /*
1046 * Initialize return stats.
1047 */
1048 pStats->cInstructions = 0;
1049 pStats->cExits = 0;
1050 pStats->cMaxExitDistance = 0;
1051 pStats->cReserved = 0;
1052
1053 /*
1054 * Initial decoder init w/ prefetch, then setup setjmp.
1055 */
1056 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
1057 if (rcStrict == VINF_SUCCESS)
1058 {
1059 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
1060 IEM_TRY_SETJMP(pVCpu, rcStrict)
1061 {
1062#ifdef IN_RING0
1063 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
1064#endif
1065 uint32_t cInstructionSinceLastExit = 0;
1066
1067 /*
1068 * The run loop. We limit ourselves to 4096 instructions right now.
1069 */
1070 PVM pVM = pVCpu->CTX_SUFF(pVM);
1071 for (;;)
1072 {
1073 /*
1074 * Log the state.
1075 */
1076#ifdef LOG_ENABLED
1077 iemLogCurInstr(pVCpu, "IEMExecForExits");
1078#endif
1079
1080 /*
1081 * Do the decoding and emulation.
1082 */
1083 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
1084
1085 rcStrict = iemExecDecodeAndInterpretTargetInstruction(pVCpu);
1086
1087 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
1088 && cInstructionSinceLastExit > 0 /* don't count the first */ )
1089 {
1090 pStats->cExits += 1;
1091 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
1092 pStats->cMaxExitDistance = cInstructionSinceLastExit;
1093 cInstructionSinceLastExit = 0;
1094 }
1095
1096 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1097 {
1098 Assert(pVCpu->iem.s.cActiveMappings == 0);
1099 pVCpu->iem.s.cInstructions++;
1100 pStats->cInstructions++;
1101 cInstructionSinceLastExit++;
1102
1103#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1104 /* Perform any VMX nested-guest instruction boundary actions. */
1105 uint64_t fCpu = pVCpu->fLocalForcedActions;
1106 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
1107 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
1108 { /* likely */ }
1109 else
1110 {
1111 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
1112 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1113 fCpu = pVCpu->fLocalForcedActions;
1114 else
1115 {
1116 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
1117 break;
1118 }
1119 }
1120#endif
1121 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
1122 {
1123#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
1124 uint64_t fCpu = pVCpu->fLocalForcedActions;
1125#endif
1126 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
1127 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
1128 | VMCPU_FF_TLB_FLUSH
1129 | VMCPU_FF_UNHALT );
1130 if (RT_LIKELY( ( iemExecLoopTargetCheckMaskedCpuFFs(pVCpu, fCpu)
1131 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
1132 || pStats->cInstructions < cMinInstructions))
1133 {
1134 if (pStats->cInstructions < cMaxInstructions)
1135 {
1136 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
1137 {
1138#ifdef IN_RING0
1139 if ( !fCheckPreemptionPending
1140 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
1141#endif
1142 {
1143 Assert(pVCpu->iem.s.cActiveMappings == 0);
1144 iemReInitDecoder(pVCpu);
1145 continue;
1146 }
1147#ifdef IN_RING0
1148 rcStrict = VINF_EM_RAW_INTERRUPT;
1149 break;
1150#endif
1151 }
1152 }
1153 }
1154 Assert(!(fCpu & VMCPU_FF_IEM));
1155 }
1156 Assert(pVCpu->iem.s.cActiveMappings == 0);
1157 }
1158 else if (pVCpu->iem.s.cActiveMappings > 0)
1159 iemMemRollback(pVCpu);
1160 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
1161 break;
1162 }
1163 }
1164 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
1165 {
1166 if (pVCpu->iem.s.cActiveMappings > 0)
1167 iemMemRollback(pVCpu);
1168 pVCpu->iem.s.cLongJumps++;
1169 }
1170 IEM_CATCH_LONGJMP_END(pVCpu);
1171
1172#ifdef VBOX_STRICT
1173 iemInitExecTailStrictTarget(pVCpu);
1174#endif
1175 }
1176 else
1177 {
1178 if (pVCpu->iem.s.cActiveMappings > 0)
1179 iemMemRollback(pVCpu);
1180
1181#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1182 /*
1183 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
1184 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
1185 */
1186 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
1187#endif
1188 }
1189
1190 /*
1191 * Maybe re-enter raw-mode and log.
1192 */
1193#ifdef LOG_ENABLED
1194 if (rcStrict != VINF_SUCCESS)
1195 LOGFLOW_REG_STATE_EX("IEMExecLots", " - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u",
1196 VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance);
1197#endif
1198 return rcStrict;
1199}
1200
1201
1202/**
1203 * Injects a trap, fault, abort, software interrupt or external interrupt.
1204 *
1205 * The parameter list matches TRPMQueryTrapAll pretty closely.
1206 *
1207 * @returns Strict VBox status code.
1208 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1209 * @param u8TrapNo The trap number.
1210 * @param enmType What type is it (trap/fault/abort), software
1211 * interrupt or hardware interrupt.
1212 * @param uErrCode The error code if applicable.
1213 * @param uCr2 The CR2 value if applicable.
1214 * @param cbInstr The instruction length (only relevant for
1215 * software interrupts).
1216 * @note x86 specific, but difficult to move due to iemInitDecoder dep.
1217 */
1218VMM_INT_DECL(VBOXSTRICTRC)
1219IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2, uint8_t cbInstr)
1220{
1221#ifdef VBOX_VMM_TARGET_X86
1222 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
1223# ifdef DBGFTRACE_ENABLED
1224 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
1225 u8TrapNo, enmType, uErrCode, uCr2);
1226# endif
1227
1228 uint32_t fFlags;
1229 switch (enmType)
1230 {
1231 case TRPM_HARDWARE_INT:
1232 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
1233 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
1234 uErrCode = uCr2 = 0;
1235 break;
1236
1237 case TRPM_SOFTWARE_INT:
1238 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
1239 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
1240 uErrCode = uCr2 = 0;
1241 break;
1242
1243 case TRPM_TRAP:
1244 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
1245 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
1246 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
1247 if (u8TrapNo == X86_XCPT_PF)
1248 fFlags |= IEM_XCPT_FLAGS_CR2;
1249 switch (u8TrapNo)
1250 {
1251 case X86_XCPT_DF:
1252 case X86_XCPT_TS:
1253 case X86_XCPT_NP:
1254 case X86_XCPT_SS:
1255 case X86_XCPT_PF:
1256 case X86_XCPT_AC:
1257 case X86_XCPT_GP:
1258 fFlags |= IEM_XCPT_FLAGS_ERR;
1259 break;
1260 }
1261 break;
1262
1263 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1264 }
1265
1266 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
1267
1268 if (pVCpu->iem.s.cActiveMappings > 0)
1269 iemMemRollback(pVCpu);
1270
1271 return rcStrict;
1272
1273#else /* !VBOX_VMM_TARGET_X86 */
1274 RT_NOREF(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
1275 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1276#endif /* !VBOX_VMM_TARGET_X86 */
1277}
1278
1279
1280/**
1281 * Injects the active TRPM event.
1282 *
1283 * @returns Strict VBox status code.
1284 * @param pVCpu The cross context virtual CPU structure.
1285 */
1286VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
1287{
1288#ifndef IEM_IMPLEMENTS_TASKSWITCH
1289 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
1290#else
1291 uint8_t u8TrapNo;
1292 TRPMEVENT enmType;
1293 uint32_t uErrCode;
1294 RTGCUINTPTR uCr2;
1295 uint8_t cbInstr;
1296 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
1297 if (RT_FAILURE(rc))
1298 return rc;
1299
1300 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
1301 * ICEBP \#DB injection as a special case. */
1302 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
1303#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1304 if (rcStrict == VINF_SVM_VMEXIT)
1305 rcStrict = VINF_SUCCESS;
1306#endif
1307#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1308 if (rcStrict == VINF_VMX_VMEXIT)
1309 rcStrict = VINF_SUCCESS;
1310#endif
1311 /** @todo Are there any other codes that imply the event was successfully
1312 * delivered to the guest? See @bugref{6607}. */
1313 if ( rcStrict == VINF_SUCCESS
1314 || rcStrict == VINF_IEM_RAISED_XCPT)
1315 TRPMResetTrap(pVCpu);
1316
1317 return rcStrict;
1318#endif
1319}
1320
1321
1322VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
1323{
1324 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
1325 return VERR_NOT_IMPLEMENTED;
1326}
1327
1328
1329VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
1330{
1331 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
1332 return VERR_NOT_IMPLEMENTED;
1333}
1334
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette