VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 108278

Last change on this file since 108278 was 108278, checked in by vboxsync, 2 months ago

VMM/IEM: Removed the #ifndef IEM_WITH_SETJMP code. We've had IEM_WITH_SETJMP defined unconditionally since 7.0 and the code probably doesn't even compile w/o it, so best remove the unused code. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 51.9 KB
Line 
1/* $Id: IEMAll.cpp 108278 2025-02-18 15:46:53Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113
114/*********************************************************************************************************************************
115* Header Files *
116*********************************************************************************************************************************/
117#define LOG_GROUP LOG_GROUP_IEM
118#define VMCPU_INCL_CPUM_GST_CTX
119#ifdef IN_RING0
120# define VBOX_VMM_TARGET_X86
121#endif
122#include <VBox/vmm/iem.h>
123#include <VBox/vmm/cpum.h>
124#include <VBox/vmm/pdmapic.h>
125#include <VBox/vmm/pdm.h>
126#include <VBox/vmm/pgm.h>
127#include <VBox/vmm/iom.h>
128#include <VBox/vmm/em.h>
129#include <VBox/vmm/hm.h>
130#include <VBox/vmm/nem.h>
131#include <VBox/vmm/gcm.h>
132#include <VBox/vmm/gim.h>
133#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
134# include <VBox/vmm/em.h>
135# include <VBox/vmm/hm_svm.h>
136#endif
137#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
138# include <VBox/vmm/hmvmxinline.h>
139#endif
140#include <VBox/vmm/tm.h>
141#include <VBox/vmm/dbgf.h>
142#include <VBox/vmm/dbgftrace.h>
143#include "IEMInternal.h"
144#include <VBox/vmm/vmcc.h>
145#include <VBox/log.h>
146#include <VBox/err.h>
147#include <VBox/param.h>
148#include <VBox/dis.h>
149#include <iprt/asm-math.h>
150#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
151# include <iprt/asm-amd64-x86.h>
152#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
153# include <iprt/asm-arm.h>
154#endif
155#include <iprt/assert.h>
156#include <iprt/string.h>
157#include <iprt/x86.h>
158
159#include "IEMInline.h"
160#ifdef VBOX_VMM_TARGET_X86
161# include "target-x86/IEMInline-x86.h"
162# include "target-x86/IEMInlineDecode-x86.h"
163#endif
164
165
166
167/**
168 * Initializes the decoder state.
169 *
170 * iemReInitDecoder is mostly a copy of this function.
171 *
172 * @param pVCpu The cross context virtual CPU structure of the
173 * calling thread.
174 * @param fExecOpts Optional execution flags:
175 * - IEM_F_BYPASS_HANDLERS
176 * - IEM_F_X86_DISREGARD_LOCK
177 */
178DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
179{
180 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
181 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
182 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
183 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
184 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
185 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
186 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
187 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
188 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
189 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
190
191 /* Execution state: */
192 uint32_t fExec;
193 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
194
195 /* Decoder state: */
196 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
197 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
198 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
199 {
200 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
201 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
202 }
203 else
204 {
205 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
206 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
207 }
208 pVCpu->iem.s.fPrefixes = 0;
209 pVCpu->iem.s.uRexReg = 0;
210 pVCpu->iem.s.uRexB = 0;
211 pVCpu->iem.s.uRexIndex = 0;
212 pVCpu->iem.s.idxPrefix = 0;
213 pVCpu->iem.s.uVex3rdReg = 0;
214 pVCpu->iem.s.uVexLength = 0;
215 pVCpu->iem.s.fEvexStuff = 0;
216 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
217#ifdef IEM_WITH_CODE_TLB
218 pVCpu->iem.s.pbInstrBuf = NULL;
219 pVCpu->iem.s.offInstrNextByte = 0;
220 pVCpu->iem.s.offCurInstrStart = 0;
221# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
222 pVCpu->iem.s.offOpcode = 0;
223# endif
224# ifdef VBOX_STRICT
225 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
226 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
227 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
228 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
229# endif
230#else
231 pVCpu->iem.s.offOpcode = 0;
232 pVCpu->iem.s.cbOpcode = 0;
233#endif
234 pVCpu->iem.s.offModRm = 0;
235 pVCpu->iem.s.cActiveMappings = 0;
236 pVCpu->iem.s.iNextMapping = 0;
237 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
238
239#ifdef DBGFTRACE_ENABLED
240 switch (IEM_GET_CPU_MODE(pVCpu))
241 {
242 case IEMMODE_64BIT:
243 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
244 break;
245 case IEMMODE_32BIT:
246 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
247 break;
248 case IEMMODE_16BIT:
249 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
250 break;
251 }
252#endif
253}
254
255
256/**
257 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
258 *
259 * This is mostly a copy of iemInitDecoder.
260 *
261 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
262 */
263DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
264{
265 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
266 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
267 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
268 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
269 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
270 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
271 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
274
275 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
276 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
277 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
278
279 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
280 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
281 pVCpu->iem.s.enmEffAddrMode = enmMode;
282 if (enmMode != IEMMODE_64BIT)
283 {
284 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
285 pVCpu->iem.s.enmEffOpSize = enmMode;
286 }
287 else
288 {
289 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
290 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
291 }
292 pVCpu->iem.s.fPrefixes = 0;
293 pVCpu->iem.s.uRexReg = 0;
294 pVCpu->iem.s.uRexB = 0;
295 pVCpu->iem.s.uRexIndex = 0;
296 pVCpu->iem.s.idxPrefix = 0;
297 pVCpu->iem.s.uVex3rdReg = 0;
298 pVCpu->iem.s.uVexLength = 0;
299 pVCpu->iem.s.fEvexStuff = 0;
300 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
301#ifdef IEM_WITH_CODE_TLB
302 if (pVCpu->iem.s.pbInstrBuf)
303 {
304 uint64_t off = (enmMode == IEMMODE_64BIT
305 ? pVCpu->cpum.GstCtx.rip
306 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
307 - pVCpu->iem.s.uInstrBufPc;
308 if (off < pVCpu->iem.s.cbInstrBufTotal)
309 {
310 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
311 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
312 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
313 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
314 else
315 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
316 }
317 else
318 {
319 pVCpu->iem.s.pbInstrBuf = NULL;
320 pVCpu->iem.s.offInstrNextByte = 0;
321 pVCpu->iem.s.offCurInstrStart = 0;
322 pVCpu->iem.s.cbInstrBuf = 0;
323 pVCpu->iem.s.cbInstrBufTotal = 0;
324 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
325 }
326 }
327 else
328 {
329 pVCpu->iem.s.offInstrNextByte = 0;
330 pVCpu->iem.s.offCurInstrStart = 0;
331 pVCpu->iem.s.cbInstrBuf = 0;
332 pVCpu->iem.s.cbInstrBufTotal = 0;
333# ifdef VBOX_STRICT
334 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
335# endif
336 }
337# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
338 pVCpu->iem.s.offOpcode = 0;
339# endif
340#else /* !IEM_WITH_CODE_TLB */
341 pVCpu->iem.s.cbOpcode = 0;
342 pVCpu->iem.s.offOpcode = 0;
343#endif /* !IEM_WITH_CODE_TLB */
344 pVCpu->iem.s.offModRm = 0;
345 Assert(pVCpu->iem.s.cActiveMappings == 0);
346 pVCpu->iem.s.iNextMapping = 0;
347 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
348 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
349
350#ifdef DBGFTRACE_ENABLED
351 switch (enmMode)
352 {
353 case IEMMODE_64BIT:
354 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
355 break;
356 case IEMMODE_32BIT:
357 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
358 break;
359 case IEMMODE_16BIT:
360 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
361 break;
362 }
363#endif
364}
365
366
367/**
368 * Prefetch opcodes the first time when starting executing.
369 *
370 * @returns Strict VBox status code.
371 * @param pVCpu The cross context virtual CPU structure of the
372 * calling thread.
373 * @param fExecOpts Optional execution flags:
374 * - IEM_F_BYPASS_HANDLERS
375 * - IEM_F_X86_DISREGARD_LOCK
376 */
377DECLINLINE(VBOXSTRICTRC) iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
378{
379 iemInitDecoder(pVCpu, fExecOpts);
380
381#ifndef IEM_WITH_CODE_TLB
382 return iemOpcodeFetchPrefetch(pVCpu);
383#else
384 return VINF_SUCCESS;
385#endif
386}
387
388
389#ifdef LOG_ENABLED
390/**
391 * Logs the current instruction.
392 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
393 * @param fSameCtx Set if we have the same context information as the VMM,
394 * clear if we may have already executed an instruction in
395 * our debug context. When clear, we assume IEMCPU holds
396 * valid CPU mode info.
397 *
398 * The @a fSameCtx parameter is now misleading and obsolete.
399 * @param pszFunction The IEM function doing the execution.
400 */
401static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
402{
403# ifdef IN_RING3
404 if (LogIs2Enabled())
405 {
406 char szInstr[256];
407 uint32_t cbInstr = 0;
408 if (fSameCtx)
409 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
410 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
411 szInstr, sizeof(szInstr), &cbInstr);
412 else
413 {
414 uint32_t fFlags = 0;
415 switch (IEM_GET_CPU_MODE(pVCpu))
416 {
417 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
418 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
419 case IEMMODE_16BIT:
420 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
421 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
422 else
423 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
424 break;
425 }
426 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
427 szInstr, sizeof(szInstr), &cbInstr);
428 }
429
430 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
431 Log2(("**** %s fExec=%x\n"
432 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
433 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
434 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
435 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
436 " %s\n"
437 , pszFunction, pVCpu->iem.s.fExec,
438 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
439 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
440 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
441 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
442 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
443 szInstr));
444
445 /* This stuff sucks atm. as it fills the log with MSRs. */
446 //if (LogIs3Enabled())
447 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
448 }
449 else
450# endif
451 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
452 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
453 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
454}
455#endif /* LOG_ENABLED */
456
457
458#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
459/**
460 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
461 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
462 *
463 * @returns Modified rcStrict.
464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
465 * @param rcStrict The instruction execution status.
466 */
467static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
468{
469 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
470 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
471 {
472 /* VMX preemption timer takes priority over NMI-window exits. */
473 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
474 {
475 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
476 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
477 }
478 /*
479 * Check remaining intercepts.
480 *
481 * NMI-window and Interrupt-window VM-exits.
482 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
483 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
484 *
485 * See Intel spec. 26.7.6 "NMI-Window Exiting".
486 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
487 */
488 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
489 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
490 && !TRPMHasTrap(pVCpu))
491 {
492 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
493 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
494 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
495 {
496 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
497 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
498 }
499 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
500 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
501 {
502 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
503 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
504 }
505 }
506 }
507 /* TPR-below threshold/APIC write has the highest priority. */
508 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
509 {
510 rcStrict = iemVmxApicWriteEmulation(pVCpu);
511 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
512 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
513 }
514 /* MTF takes priority over VMX-preemption timer. */
515 else
516 {
517 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
518 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
519 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
520 }
521 return rcStrict;
522}
523#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
524
525
526/**
527 * The actual code execution bits of IEMExecOne, IEMExecOneWithPrefetchedByPC,
528 * IEMExecOneBypass and friends.
529 *
530 * Similar code is found in IEMExecLots.
531 *
532 * @return Strict VBox status code.
533 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
534 * @param fExecuteInhibit If set, execute the instruction following CLI,
535 * POP SS and MOV SS,GR.
536 * @param pszFunction The calling function name.
537 */
538DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
539{
540 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
541 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
542 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
543 RT_NOREF_PV(pszFunction);
544
545 VBOXSTRICTRC rcStrict;
546 IEM_TRY_SETJMP(pVCpu, rcStrict)
547 {
548 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
549 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
550 }
551 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
552 {
553 pVCpu->iem.s.cLongJumps++;
554 }
555 IEM_CATCH_LONGJMP_END(pVCpu);
556 if (rcStrict == VINF_SUCCESS)
557 pVCpu->iem.s.cInstructions++;
558 if (pVCpu->iem.s.cActiveMappings > 0)
559 {
560 Assert(rcStrict != VINF_SUCCESS);
561 iemMemRollback(pVCpu);
562 }
563 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
564 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
565 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
566
567//#ifdef DEBUG
568// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
569//#endif
570
571#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
572 /*
573 * Perform any VMX nested-guest instruction boundary actions.
574 *
575 * If any of these causes a VM-exit, we must skip executing the next
576 * instruction (would run into stale page tables). A VM-exit makes sure
577 * there is no interrupt-inhibition, so that should ensure we don't go
578 * to try execute the next instruction. Clearing fExecuteInhibit is
579 * problematic because of the setjmp/longjmp clobbering above.
580 */
581 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
582 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
583 || rcStrict != VINF_SUCCESS)
584 { /* likely */ }
585 else
586 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
587#endif
588
589 /* Execute the next instruction as well if a cli, pop ss or
590 mov ss, Gr has just completed successfully. */
591 if ( fExecuteInhibit
592 && rcStrict == VINF_SUCCESS
593 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
594 {
595 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
596 if (rcStrict == VINF_SUCCESS)
597 {
598#ifdef LOG_ENABLED
599 iemLogCurInstr(pVCpu, false, pszFunction);
600#endif
601 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
602 {
603 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
604 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
605 }
606 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
607 {
608 pVCpu->iem.s.cLongJumps++;
609 }
610 IEM_CATCH_LONGJMP_END(pVCpu);
611 if (rcStrict == VINF_SUCCESS)
612 {
613 pVCpu->iem.s.cInstructions++;
614#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
615 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
616 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
617 { /* likely */ }
618 else
619 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
620#endif
621 }
622 if (pVCpu->iem.s.cActiveMappings > 0)
623 {
624 Assert(rcStrict != VINF_SUCCESS);
625 iemMemRollback(pVCpu);
626 }
627 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
628 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
629 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
630 }
631 else if (pVCpu->iem.s.cActiveMappings > 0)
632 iemMemRollback(pVCpu);
633 /** @todo drop this after we bake this change into RIP advancing. */
634 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
635 }
636
637 /*
638 * Return value fiddling, statistics and sanity assertions.
639 */
640 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
641
642 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
643 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
644 return rcStrict;
645}
646
647
648/**
649 * Execute one instruction.
650 *
651 * @return Strict VBox status code.
652 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
653 */
654VMM_INT_DECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
655{
656 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
657#ifdef LOG_ENABLED
658 iemLogCurInstr(pVCpu, true, "IEMExecOne");
659#endif
660
661 /*
662 * Do the decoding and emulation.
663 */
664 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
665 if (rcStrict == VINF_SUCCESS)
666 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
667 else if (pVCpu->iem.s.cActiveMappings > 0)
668 iemMemRollback(pVCpu);
669
670 if (rcStrict != VINF_SUCCESS)
671 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
672 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
673 return rcStrict;
674}
675
676
677VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
678 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
679{
680 VBOXSTRICTRC rcStrict;
681 if ( cbOpcodeBytes
682 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
683 {
684 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
685#ifdef IEM_WITH_CODE_TLB
686 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
687 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
688 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
689 pVCpu->iem.s.offCurInstrStart = 0;
690 pVCpu->iem.s.offInstrNextByte = 0;
691 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
692#else
693 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
694 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
695#endif
696 rcStrict = VINF_SUCCESS;
697 }
698 else
699 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
700 if (rcStrict == VINF_SUCCESS)
701 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
702 else if (pVCpu->iem.s.cActiveMappings > 0)
703 iemMemRollback(pVCpu);
704
705 return rcStrict;
706}
707
708
709VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypass(PVMCPUCC pVCpu)
710{
711 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
712 if (rcStrict == VINF_SUCCESS)
713 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypass");
714 else if (pVCpu->iem.s.cActiveMappings > 0)
715 iemMemRollback(pVCpu);
716
717 return rcStrict;
718}
719
720
721VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
722 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
723{
724 VBOXSTRICTRC rcStrict;
725 if ( cbOpcodeBytes
726 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
727 {
728 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
729#ifdef IEM_WITH_CODE_TLB
730 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
731 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
732 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
733 pVCpu->iem.s.offCurInstrStart = 0;
734 pVCpu->iem.s.offInstrNextByte = 0;
735 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
736#else
737 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
738 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
739#endif
740 rcStrict = VINF_SUCCESS;
741 }
742 else
743 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
744 if (rcStrict == VINF_SUCCESS)
745 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
746 else if (pVCpu->iem.s.cActiveMappings > 0)
747 iemMemRollback(pVCpu);
748
749 return rcStrict;
750}
751
752
753/**
754 * For handling split cacheline lock operations when the host has split-lock
755 * detection enabled.
756 *
757 * This will cause the interpreter to disregard the lock prefix and implicit
758 * locking (xchg).
759 *
760 * @returns Strict VBox status code.
761 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
762 */
763VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
764{
765 /*
766 * Do the decoding and emulation.
767 */
768 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
769 if (rcStrict == VINF_SUCCESS)
770 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
771 else if (pVCpu->iem.s.cActiveMappings > 0)
772 iemMemRollback(pVCpu);
773
774 if (rcStrict != VINF_SUCCESS)
775 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
776 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
777 return rcStrict;
778}
779
780
781/**
782 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
783 * inject a pending TRPM trap.
784 */
785VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
786{
787 Assert(TRPMHasTrap(pVCpu));
788
789 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
790 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
791 {
792 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
793#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
794 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
795 if (fIntrEnabled)
796 {
797 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
798 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
799 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
800 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
801 else
802 {
803 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
804 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
805 }
806 }
807#else
808 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
809#endif
810 if (fIntrEnabled)
811 {
812 uint8_t u8TrapNo;
813 TRPMEVENT enmType;
814 uint32_t uErrCode;
815 RTGCPTR uCr2;
816 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
817 AssertRC(rc2);
818 Assert(enmType == TRPM_HARDWARE_INT);
819 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
820
821 TRPMResetTrap(pVCpu);
822
823#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
824 /* Injecting an event may cause a VM-exit. */
825 if ( rcStrict != VINF_SUCCESS
826 && rcStrict != VINF_IEM_RAISED_XCPT)
827 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
828#else
829 NOREF(rcStrict);
830#endif
831 }
832 }
833
834 return VINF_SUCCESS;
835}
836
837
838VMM_INT_DECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
839{
840 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
841 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
842 Assert(cMaxInstructions > 0);
843
844 /*
845 * See if there is an interrupt pending in TRPM, inject it if we can.
846 */
847 /** @todo What if we are injecting an exception and not an interrupt? Is that
848 * possible here? For now we assert it is indeed only an interrupt. */
849 if (!TRPMHasTrap(pVCpu))
850 { /* likely */ }
851 else
852 {
853 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
854 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
855 { /*likely */ }
856 else
857 return rcStrict;
858 }
859
860 /*
861 * Initial decoder init w/ prefetch, then setup setjmp.
862 */
863 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
864 if (rcStrict == VINF_SUCCESS)
865 {
866 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
867 IEM_TRY_SETJMP(pVCpu, rcStrict)
868 {
869 /*
870 * The run loop. We limit ourselves to 4096 instructions right now.
871 */
872 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
873 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
874 for (;;)
875 {
876 /*
877 * Log the state.
878 */
879#ifdef LOG_ENABLED
880 iemLogCurInstr(pVCpu, true, "IEMExecLots");
881#endif
882
883 /*
884 * Do the decoding and emulation.
885 */
886 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
887 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
888#ifdef VBOX_STRICT
889 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
890#endif
891 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
892 {
893 Assert(pVCpu->iem.s.cActiveMappings == 0);
894 pVCpu->iem.s.cInstructions++;
895
896#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
897 /* Perform any VMX nested-guest instruction boundary actions. */
898 uint64_t fCpu = pVCpu->fLocalForcedActions;
899 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
900 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
901 { /* likely */ }
902 else
903 {
904 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
905 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
906 fCpu = pVCpu->fLocalForcedActions;
907 else
908 {
909 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
910 break;
911 }
912 }
913#endif
914 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
915 {
916#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
917 uint64_t fCpu = pVCpu->fLocalForcedActions;
918#endif
919 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
920 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
921 | VMCPU_FF_TLB_FLUSH
922 | VMCPU_FF_UNHALT );
923
924 if (RT_LIKELY( ( !fCpu
925 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
926 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
927 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
928 {
929 if (--cMaxInstructionsGccStupidity > 0)
930 {
931 /* Poll timers every now an then according to the caller's specs. */
932 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
933 || !TMTimerPollBool(pVM, pVCpu))
934 {
935 Assert(pVCpu->iem.s.cActiveMappings == 0);
936 iemReInitDecoder(pVCpu);
937 continue;
938 }
939 }
940 }
941 }
942 Assert(pVCpu->iem.s.cActiveMappings == 0);
943 }
944 else if (pVCpu->iem.s.cActiveMappings > 0)
945 iemMemRollback(pVCpu);
946 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
947 break;
948 }
949 }
950 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
951 {
952 if (pVCpu->iem.s.cActiveMappings > 0)
953 iemMemRollback(pVCpu);
954#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
955 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
956#endif
957 pVCpu->iem.s.cLongJumps++;
958 }
959 IEM_CATCH_LONGJMP_END(pVCpu);
960
961 /*
962 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
963 */
964 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
965 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
966 }
967 else
968 {
969 if (pVCpu->iem.s.cActiveMappings > 0)
970 iemMemRollback(pVCpu);
971
972#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
973 /*
974 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
975 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
976 */
977 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
978#endif
979 }
980
981 /*
982 * Maybe re-enter raw-mode and log.
983 */
984 if (rcStrict != VINF_SUCCESS)
985 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
986 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
987 if (pcInstructions)
988 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
989 return rcStrict;
990}
991
992
993/**
994 * Interface used by EMExecuteExec, does exit statistics and limits.
995 *
996 * @returns Strict VBox status code.
997 * @param pVCpu The cross context virtual CPU structure.
998 * @param fWillExit To be defined.
999 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
1000 * @param cMaxInstructions Maximum number of instructions to execute.
1001 * @param cMaxInstructionsWithoutExits
1002 * The max number of instructions without exits.
1003 * @param pStats Where to return statistics.
1004 */
1005VMM_INT_DECL(VBOXSTRICTRC)
1006IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
1007 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
1008{
1009 NOREF(fWillExit); /** @todo define flexible exit crits */
1010
1011 /*
1012 * Initialize return stats.
1013 */
1014 pStats->cInstructions = 0;
1015 pStats->cExits = 0;
1016 pStats->cMaxExitDistance = 0;
1017 pStats->cReserved = 0;
1018
1019 /*
1020 * Initial decoder init w/ prefetch, then setup setjmp.
1021 */
1022 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
1023 if (rcStrict == VINF_SUCCESS)
1024 {
1025 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
1026 IEM_TRY_SETJMP(pVCpu, rcStrict)
1027 {
1028#ifdef IN_RING0
1029 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
1030#endif
1031 uint32_t cInstructionSinceLastExit = 0;
1032
1033 /*
1034 * The run loop. We limit ourselves to 4096 instructions right now.
1035 */
1036 PVM pVM = pVCpu->CTX_SUFF(pVM);
1037 for (;;)
1038 {
1039 /*
1040 * Log the state.
1041 */
1042#ifdef LOG_ENABLED
1043 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
1044#endif
1045
1046 /*
1047 * Do the decoding and emulation.
1048 */
1049 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
1050
1051 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
1052 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
1053
1054 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
1055 && cInstructionSinceLastExit > 0 /* don't count the first */ )
1056 {
1057 pStats->cExits += 1;
1058 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
1059 pStats->cMaxExitDistance = cInstructionSinceLastExit;
1060 cInstructionSinceLastExit = 0;
1061 }
1062
1063 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1064 {
1065 Assert(pVCpu->iem.s.cActiveMappings == 0);
1066 pVCpu->iem.s.cInstructions++;
1067 pStats->cInstructions++;
1068 cInstructionSinceLastExit++;
1069
1070#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1071 /* Perform any VMX nested-guest instruction boundary actions. */
1072 uint64_t fCpu = pVCpu->fLocalForcedActions;
1073 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
1074 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
1075 { /* likely */ }
1076 else
1077 {
1078 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
1079 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1080 fCpu = pVCpu->fLocalForcedActions;
1081 else
1082 {
1083 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
1084 break;
1085 }
1086 }
1087#endif
1088 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
1089 {
1090#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
1091 uint64_t fCpu = pVCpu->fLocalForcedActions;
1092#endif
1093 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
1094 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
1095 | VMCPU_FF_TLB_FLUSH
1096 | VMCPU_FF_UNHALT );
1097 if (RT_LIKELY( ( ( !fCpu
1098 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1099 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
1100 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
1101 || pStats->cInstructions < cMinInstructions))
1102 {
1103 if (pStats->cInstructions < cMaxInstructions)
1104 {
1105 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
1106 {
1107#ifdef IN_RING0
1108 if ( !fCheckPreemptionPending
1109 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
1110#endif
1111 {
1112 Assert(pVCpu->iem.s.cActiveMappings == 0);
1113 iemReInitDecoder(pVCpu);
1114 continue;
1115 }
1116#ifdef IN_RING0
1117 rcStrict = VINF_EM_RAW_INTERRUPT;
1118 break;
1119#endif
1120 }
1121 }
1122 }
1123 Assert(!(fCpu & VMCPU_FF_IEM));
1124 }
1125 Assert(pVCpu->iem.s.cActiveMappings == 0);
1126 }
1127 else if (pVCpu->iem.s.cActiveMappings > 0)
1128 iemMemRollback(pVCpu);
1129 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
1130 break;
1131 }
1132 }
1133 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
1134 {
1135 if (pVCpu->iem.s.cActiveMappings > 0)
1136 iemMemRollback(pVCpu);
1137 pVCpu->iem.s.cLongJumps++;
1138 }
1139 IEM_CATCH_LONGJMP_END(pVCpu);
1140
1141 /*
1142 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
1143 */
1144 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1145 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1146 }
1147 else
1148 {
1149 if (pVCpu->iem.s.cActiveMappings > 0)
1150 iemMemRollback(pVCpu);
1151
1152#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1153 /*
1154 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
1155 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
1156 */
1157 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
1158#endif
1159 }
1160
1161 /*
1162 * Maybe re-enter raw-mode and log.
1163 */
1164 if (rcStrict != VINF_SUCCESS)
1165 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
1166 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
1167 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
1168 return rcStrict;
1169}
1170
1171
1172/**
1173 * Injects a trap, fault, abort, software interrupt or external interrupt.
1174 *
1175 * The parameter list matches TRPMQueryTrapAll pretty closely.
1176 *
1177 * @returns Strict VBox status code.
1178 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1179 * @param u8TrapNo The trap number.
1180 * @param enmType What type is it (trap/fault/abort), software
1181 * interrupt or hardware interrupt.
1182 * @param uErrCode The error code if applicable.
1183 * @param uCr2 The CR2 value if applicable.
1184 * @param cbInstr The instruction length (only relevant for
1185 * software interrupts).
1186 * @note x86 specific, but difficult to move due to iemInitDecoder dep.
1187 */
1188VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
1189 uint8_t cbInstr)
1190{
1191 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
1192#ifdef DBGFTRACE_ENABLED
1193 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
1194 u8TrapNo, enmType, uErrCode, uCr2);
1195#endif
1196
1197 uint32_t fFlags;
1198 switch (enmType)
1199 {
1200 case TRPM_HARDWARE_INT:
1201 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
1202 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
1203 uErrCode = uCr2 = 0;
1204 break;
1205
1206 case TRPM_SOFTWARE_INT:
1207 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
1208 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
1209 uErrCode = uCr2 = 0;
1210 break;
1211
1212 case TRPM_TRAP:
1213 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
1214 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
1215 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
1216 if (u8TrapNo == X86_XCPT_PF)
1217 fFlags |= IEM_XCPT_FLAGS_CR2;
1218 switch (u8TrapNo)
1219 {
1220 case X86_XCPT_DF:
1221 case X86_XCPT_TS:
1222 case X86_XCPT_NP:
1223 case X86_XCPT_SS:
1224 case X86_XCPT_PF:
1225 case X86_XCPT_AC:
1226 case X86_XCPT_GP:
1227 fFlags |= IEM_XCPT_FLAGS_ERR;
1228 break;
1229 }
1230 break;
1231
1232 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1233 }
1234
1235 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
1236
1237 if (pVCpu->iem.s.cActiveMappings > 0)
1238 iemMemRollback(pVCpu);
1239
1240 return rcStrict;
1241}
1242
1243
1244/**
1245 * Injects the active TRPM event.
1246 *
1247 * @returns Strict VBox status code.
1248 * @param pVCpu The cross context virtual CPU structure.
1249 */
1250VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
1251{
1252#ifndef IEM_IMPLEMENTS_TASKSWITCH
1253 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
1254#else
1255 uint8_t u8TrapNo;
1256 TRPMEVENT enmType;
1257 uint32_t uErrCode;
1258 RTGCUINTPTR uCr2;
1259 uint8_t cbInstr;
1260 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
1261 if (RT_FAILURE(rc))
1262 return rc;
1263
1264 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
1265 * ICEBP \#DB injection as a special case. */
1266 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
1267#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1268 if (rcStrict == VINF_SVM_VMEXIT)
1269 rcStrict = VINF_SUCCESS;
1270#endif
1271#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1272 if (rcStrict == VINF_VMX_VMEXIT)
1273 rcStrict = VINF_SUCCESS;
1274#endif
1275 /** @todo Are there any other codes that imply the event was successfully
1276 * delivered to the guest? See @bugref{6607}. */
1277 if ( rcStrict == VINF_SUCCESS
1278 || rcStrict == VINF_IEM_RAISED_XCPT)
1279 TRPMResetTrap(pVCpu);
1280
1281 return rcStrict;
1282#endif
1283}
1284
1285
1286VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
1287{
1288 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
1289 return VERR_NOT_IMPLEMENTED;
1290}
1291
1292
1293VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
1294{
1295 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
1296 return VERR_NOT_IMPLEMENTED;
1297}
1298
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette