VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 108226

Last change on this file since 108226 was 108226, checked in by vboxsync, 3 months ago

VMM/IEM: Splitting up IEMAll.cpp. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 194.4 KB
Line 
1/* $Id: IEMAll.cpp 108226 2025-02-14 15:54:48Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#ifdef IN_RING0
125# define VBOX_VMM_TARGET_X86
126#endif
127#include <VBox/vmm/iem.h>
128#include <VBox/vmm/cpum.h>
129#include <VBox/vmm/pdmapic.h>
130#include <VBox/vmm/pdm.h>
131#include <VBox/vmm/pgm.h>
132#include <VBox/vmm/iom.h>
133#include <VBox/vmm/em.h>
134#include <VBox/vmm/hm.h>
135#include <VBox/vmm/nem.h>
136#include <VBox/vmm/gcm.h>
137#include <VBox/vmm/gim.h>
138#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
139# include <VBox/vmm/em.h>
140# include <VBox/vmm/hm_svm.h>
141#endif
142#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
143# include <VBox/vmm/hmvmxinline.h>
144#endif
145#include <VBox/vmm/tm.h>
146#include <VBox/vmm/dbgf.h>
147#include <VBox/vmm/dbgftrace.h>
148#include "IEMInternal.h"
149#include <VBox/vmm/vmcc.h>
150#include <VBox/log.h>
151#include <VBox/err.h>
152#include <VBox/param.h>
153#include <VBox/dis.h>
154#include <iprt/asm-math.h>
155#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
156# include <iprt/asm-amd64-x86.h>
157#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
158# include <iprt/asm-arm.h>
159#endif
160#include <iprt/assert.h>
161#include <iprt/string.h>
162#include <iprt/x86.h>
163
164#include "IEMInline.h"
165#ifdef VBOX_VMM_TARGET_X86
166# include "target-x86/IEMAllTlbInline-x86.h"
167#endif
168
169
170/*********************************************************************************************************************************
171* Global Variables *
172*********************************************************************************************************************************/
173#if defined(IEM_LOG_MEMORY_WRITES)
174/** What IEM just wrote. */
175uint8_t g_abIemWrote[256];
176/** How much IEM just wrote. */
177size_t g_cbIemWrote;
178#endif
179
180
181/**
182 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
183 * path.
184 *
185 * This will also invalidate TLB entries for any pages with active data
186 * breakpoints on them.
187 *
188 * @returns IEM_F_BRK_PENDING_XXX or zero.
189 * @param pVCpu The cross context virtual CPU structure of the
190 * calling thread.
191 *
192 * @note Don't call directly, use iemCalcExecDbgFlags instead.
193 */
194uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
195{
196 uint32_t fExec = 0;
197
198 /*
199 * Helper for invalidate the data TLB for breakpoint addresses.
200 *
201 * This is to make sure any access to the page will always trigger a TLB
202 * load for as long as the breakpoint is enabled.
203 */
204#ifdef IEM_WITH_DATA_TLB
205# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
206 RTGCPTR uTagNoRev = (a_uValue); \
207 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
208 /** @todo do large page accounting */ \
209 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
210 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
211 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
212 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
213 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
214 } while (0)
215#else
216# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
217#endif
218
219 /*
220 * Process guest breakpoints.
221 */
222#define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
223 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
224 { \
225 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
226 { \
227 case X86_DR7_RW_EO: \
228 fExec |= IEM_F_PENDING_BRK_INSTR; \
229 break; \
230 case X86_DR7_RW_WO: \
231 case X86_DR7_RW_RW: \
232 fExec |= IEM_F_PENDING_BRK_DATA; \
233 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
234 break; \
235 case X86_DR7_RW_IO: \
236 fExec |= IEM_F_PENDING_BRK_X86_IO; \
237 break; \
238 } \
239 } \
240 } while (0)
241
242 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
243 if (fGstDr7 & X86_DR7_ENABLED_MASK)
244 {
245/** @todo extract more details here to simplify matching later. */
246#ifdef IEM_WITH_DATA_TLB
247 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
248#endif
249 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
250 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
251 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
252 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
253 }
254
255 /*
256 * Process hypervisor breakpoints.
257 */
258 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
259 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
260 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
261 {
262/** @todo extract more details here to simplify matching later. */
263 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
264 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
265 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
266 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
267 }
268
269 return fExec;
270}
271
272
273/**
274 * Initializes the decoder state.
275 *
276 * iemReInitDecoder is mostly a copy of this function.
277 *
278 * @param pVCpu The cross context virtual CPU structure of the
279 * calling thread.
280 * @param fExecOpts Optional execution flags:
281 * - IEM_F_BYPASS_HANDLERS
282 * - IEM_F_X86_DISREGARD_LOCK
283 */
284DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
285{
286 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
287 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
288 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
289 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
290 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
291 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
292 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
293 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
294 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
295 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
296
297 /* Execution state: */
298 uint32_t fExec;
299 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
300
301 /* Decoder state: */
302 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
303 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
304 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
305 {
306 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
307 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
308 }
309 else
310 {
311 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
312 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
313 }
314 pVCpu->iem.s.fPrefixes = 0;
315 pVCpu->iem.s.uRexReg = 0;
316 pVCpu->iem.s.uRexB = 0;
317 pVCpu->iem.s.uRexIndex = 0;
318 pVCpu->iem.s.idxPrefix = 0;
319 pVCpu->iem.s.uVex3rdReg = 0;
320 pVCpu->iem.s.uVexLength = 0;
321 pVCpu->iem.s.fEvexStuff = 0;
322 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
323#ifdef IEM_WITH_CODE_TLB
324 pVCpu->iem.s.pbInstrBuf = NULL;
325 pVCpu->iem.s.offInstrNextByte = 0;
326 pVCpu->iem.s.offCurInstrStart = 0;
327# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
328 pVCpu->iem.s.offOpcode = 0;
329# endif
330# ifdef VBOX_STRICT
331 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
332 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
333 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
334 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
335# endif
336#else
337 pVCpu->iem.s.offOpcode = 0;
338 pVCpu->iem.s.cbOpcode = 0;
339#endif
340 pVCpu->iem.s.offModRm = 0;
341 pVCpu->iem.s.cActiveMappings = 0;
342 pVCpu->iem.s.iNextMapping = 0;
343 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
344
345#ifdef DBGFTRACE_ENABLED
346 switch (IEM_GET_CPU_MODE(pVCpu))
347 {
348 case IEMMODE_64BIT:
349 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
350 break;
351 case IEMMODE_32BIT:
352 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
353 break;
354 case IEMMODE_16BIT:
355 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
356 break;
357 }
358#endif
359}
360
361
362/**
363 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
364 *
365 * This is mostly a copy of iemInitDecoder.
366 *
367 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
368 */
369DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
370{
371 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
372 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
373 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
374 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
375 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
376 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
377 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
378 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
379 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
380
381 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
382 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
383 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
384
385 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
386 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
387 pVCpu->iem.s.enmEffAddrMode = enmMode;
388 if (enmMode != IEMMODE_64BIT)
389 {
390 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
391 pVCpu->iem.s.enmEffOpSize = enmMode;
392 }
393 else
394 {
395 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
396 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
397 }
398 pVCpu->iem.s.fPrefixes = 0;
399 pVCpu->iem.s.uRexReg = 0;
400 pVCpu->iem.s.uRexB = 0;
401 pVCpu->iem.s.uRexIndex = 0;
402 pVCpu->iem.s.idxPrefix = 0;
403 pVCpu->iem.s.uVex3rdReg = 0;
404 pVCpu->iem.s.uVexLength = 0;
405 pVCpu->iem.s.fEvexStuff = 0;
406 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
407#ifdef IEM_WITH_CODE_TLB
408 if (pVCpu->iem.s.pbInstrBuf)
409 {
410 uint64_t off = (enmMode == IEMMODE_64BIT
411 ? pVCpu->cpum.GstCtx.rip
412 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
413 - pVCpu->iem.s.uInstrBufPc;
414 if (off < pVCpu->iem.s.cbInstrBufTotal)
415 {
416 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
417 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
418 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
419 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
420 else
421 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
422 }
423 else
424 {
425 pVCpu->iem.s.pbInstrBuf = NULL;
426 pVCpu->iem.s.offInstrNextByte = 0;
427 pVCpu->iem.s.offCurInstrStart = 0;
428 pVCpu->iem.s.cbInstrBuf = 0;
429 pVCpu->iem.s.cbInstrBufTotal = 0;
430 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
431 }
432 }
433 else
434 {
435 pVCpu->iem.s.offInstrNextByte = 0;
436 pVCpu->iem.s.offCurInstrStart = 0;
437 pVCpu->iem.s.cbInstrBuf = 0;
438 pVCpu->iem.s.cbInstrBufTotal = 0;
439# ifdef VBOX_STRICT
440 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
441# endif
442 }
443# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
444 pVCpu->iem.s.offOpcode = 0;
445# endif
446#else /* !IEM_WITH_CODE_TLB */
447 pVCpu->iem.s.cbOpcode = 0;
448 pVCpu->iem.s.offOpcode = 0;
449#endif /* !IEM_WITH_CODE_TLB */
450 pVCpu->iem.s.offModRm = 0;
451 Assert(pVCpu->iem.s.cActiveMappings == 0);
452 pVCpu->iem.s.iNextMapping = 0;
453 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
454 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
455
456#ifdef DBGFTRACE_ENABLED
457 switch (enmMode)
458 {
459 case IEMMODE_64BIT:
460 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
461 break;
462 case IEMMODE_32BIT:
463 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
464 break;
465 case IEMMODE_16BIT:
466 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
467 break;
468 }
469#endif
470}
471
472
473
474/**
475 * Prefetch opcodes the first time when starting executing.
476 *
477 * @returns Strict VBox status code.
478 * @param pVCpu The cross context virtual CPU structure of the
479 * calling thread.
480 * @param fExecOpts Optional execution flags:
481 * - IEM_F_BYPASS_HANDLERS
482 * - IEM_F_X86_DISREGARD_LOCK
483 */
484static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
485{
486 iemInitDecoder(pVCpu, fExecOpts);
487
488#ifndef IEM_WITH_CODE_TLB
489 /*
490 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
491 *
492 * First translate CS:rIP to a physical address.
493 *
494 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
495 * all relevant bytes from the first page, as it ASSUMES it's only ever
496 * called for dealing with CS.LIM, page crossing and instructions that
497 * are too long.
498 */
499 uint32_t cbToTryRead;
500 RTGCPTR GCPtrPC;
501 if (IEM_IS_64BIT_CODE(pVCpu))
502 {
503 cbToTryRead = GUEST_PAGE_SIZE;
504 GCPtrPC = pVCpu->cpum.GstCtx.rip;
505 if (IEM_IS_CANONICAL(GCPtrPC))
506 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
507 else
508 return iemRaiseGeneralProtectionFault0(pVCpu);
509 }
510 else
511 {
512 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
513 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
514 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
515 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
516 else
517 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
518 if (cbToTryRead) { /* likely */ }
519 else /* overflowed */
520 {
521 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
522 cbToTryRead = UINT32_MAX;
523 }
524 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
525 Assert(GCPtrPC <= UINT32_MAX);
526 }
527
528 PGMPTWALKFAST WalkFast;
529 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
530 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
531 &WalkFast);
532 if (RT_SUCCESS(rc))
533 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
534 else
535 {
536 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
537# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
538/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
539 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
540 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
544 }
545#if 0
546 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
547 else
548 {
549 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
550# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
551/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
552# error completely wrong
553 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
554 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
555# endif
556 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
557 }
558 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
559 else
560 {
561 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
562# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
563/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
564# error completely wrong.
565 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
566 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
567# endif
568 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
569 }
570#else
571 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
572 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
573#endif
574 RTGCPHYS const GCPhys = WalkFast.GCPhys;
575
576 /*
577 * Read the bytes at this address.
578 */
579 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
580 if (cbToTryRead > cbLeftOnPage)
581 cbToTryRead = cbLeftOnPage;
582 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
583 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
584
585 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
586 {
587 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
588 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
589 { /* likely */ }
590 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
591 {
592 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
593 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
594 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
595 }
596 else
597 {
598 Log((RT_SUCCESS(rcStrict)
599 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
600 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
601 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
602 return rcStrict;
603 }
604 }
605 else
606 {
607 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
608 if (RT_SUCCESS(rc))
609 { /* likely */ }
610 else
611 {
612 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
613 GCPtrPC, GCPhys, rc, cbToTryRead));
614 return rc;
615 }
616 }
617 pVCpu->iem.s.cbOpcode = cbToTryRead;
618#endif /* !IEM_WITH_CODE_TLB */
619 return VINF_SUCCESS;
620}
621
622
623#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
624/**
625 * Worker for iemTlbInvalidateAll.
626 */
627template<bool a_fGlobal>
628DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
629{
630 if (!a_fGlobal)
631 pTlb->cTlsFlushes++;
632 else
633 pTlb->cTlsGlobalFlushes++;
634
635 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
636 if (RT_LIKELY(pTlb->uTlbRevision != 0))
637 { /* very likely */ }
638 else
639 {
640 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
641 pTlb->cTlbRevisionRollovers++;
642 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
643 while (i-- > 0)
644 pTlb->aEntries[i * 2].uTag = 0;
645 }
646
647 pTlb->cTlbNonGlobalLargePageCurLoads = 0;
648 pTlb->NonGlobalLargePageRange.uLastTag = 0;
649 pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
650
651 if (a_fGlobal)
652 {
653 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
654 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
655 { /* very likely */ }
656 else
657 {
658 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
659 pTlb->cTlbRevisionRollovers++;
660 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
661 while (i-- > 0)
662 pTlb->aEntries[i * 2 + 1].uTag = 0;
663 }
664
665 pTlb->cTlbGlobalLargePageCurLoads = 0;
666 pTlb->GlobalLargePageRange.uLastTag = 0;
667 pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
668 }
669}
670#endif
671
672
673/**
674 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
675 */
676template<bool a_fGlobal>
677DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
678{
679#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
680 Log10(("IEMTlbInvalidateAll\n"));
681
682# ifdef IEM_WITH_CODE_TLB
683 pVCpu->iem.s.cbInstrBufTotal = 0;
684 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
685 if (a_fGlobal)
686 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);
687 else
688 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);
689# endif
690
691# ifdef IEM_WITH_DATA_TLB
692 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
693 if (a_fGlobal)
694 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);
695 else
696 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);
697# endif
698#else
699 RT_NOREF(pVCpu);
700#endif
701}
702
703
704/**
705 * Invalidates non-global the IEM TLB entries.
706 *
707 * This is called internally as well as by PGM when moving GC mappings.
708 *
709 * @param pVCpu The cross context virtual CPU structure of the calling
710 * thread.
711 */
712VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
713{
714 iemTlbInvalidateAll<false>(pVCpu);
715}
716
717
718/**
719 * Invalidates all the IEM TLB entries.
720 *
721 * This is called internally as well as by PGM when moving GC mappings.
722 *
723 * @param pVCpu The cross context virtual CPU structure of the calling
724 * thread.
725 */
726VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
727{
728 iemTlbInvalidateAll<true>(pVCpu);
729}
730
731
732/**
733 * Invalidates a page in the TLBs.
734 *
735 * @param pVCpu The cross context virtual CPU structure of the calling
736 * thread.
737 * @param GCPtr The address of the page to invalidate
738 * @thread EMT(pVCpu)
739 */
740VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
741{
742 IEMTLBTRACE_INVLPG(pVCpu, GCPtr);
743#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
744 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
745 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
746 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
747 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
748
749# ifdef IEM_WITH_CODE_TLB
750 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
751# endif
752# ifdef IEM_WITH_DATA_TLB
753 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
754# endif
755#else
756 NOREF(pVCpu); NOREF(GCPtr);
757#endif
758}
759
760
761#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
762/**
763 * Invalid both TLBs slow fashion following a rollover.
764 *
765 * Worker for IEMTlbInvalidateAllPhysical,
766 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
767 * iemMemMapJmp and others.
768 *
769 * @thread EMT(pVCpu)
770 */
771void iemTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu) RT_NOEXCEPT
772{
773 Log10(("iemTlbInvalidateAllPhysicalSlow\n"));
774 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
775 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
776
777 unsigned i;
778# ifdef IEM_WITH_CODE_TLB
779 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
780 while (i-- > 0)
781 {
782 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
783 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
784 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
785 }
786 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
787 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
788# endif
789# ifdef IEM_WITH_DATA_TLB
790 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
791 while (i-- > 0)
792 {
793 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
794 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
795 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
796 }
797 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
798 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
799# endif
800
801}
802#endif
803
804
805/**
806 * Invalidates the host physical aspects of the IEM TLBs.
807 *
808 * This is called internally as well as by PGM when moving GC mappings.
809 *
810 * @param pVCpu The cross context virtual CPU structure of the calling
811 * thread.
812 * @note Currently not used.
813 */
814VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
815{
816#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
817 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
818 Log10(("IEMTlbInvalidateAllPhysical\n"));
819
820# ifdef IEM_WITH_CODE_TLB
821 pVCpu->iem.s.cbInstrBufTotal = 0;
822# endif
823 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
824 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
825 {
826 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
827 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
828 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
829 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
830 }
831 else
832 iemTlbInvalidateAllPhysicalSlow(pVCpu);
833#else
834 NOREF(pVCpu);
835#endif
836}
837
838
839/**
840 * Invalidates the host physical aspects of the IEM TLBs.
841 *
842 * This is called internally as well as by PGM when moving GC mappings.
843 *
844 * @param pVM The cross context VM structure.
845 * @param idCpuCaller The ID of the calling EMT if available to the caller,
846 * otherwise NIL_VMCPUID.
847 * @param enmReason The reason we're called.
848 *
849 * @remarks Caller holds the PGM lock.
850 */
851VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
852{
853#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
854 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
855 if (pVCpuCaller)
856 VMCPU_ASSERT_EMT(pVCpuCaller);
857 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
858
859 VMCC_FOR_EACH_VMCPU(pVM)
860 {
861# ifdef IEM_WITH_CODE_TLB
862 if (pVCpuCaller == pVCpu)
863 pVCpu->iem.s.cbInstrBufTotal = 0;
864# endif
865
866 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
867 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
868 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
869 { /* likely */}
870 else if (pVCpuCaller != pVCpu)
871 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
872 else
873 {
874 iemTlbInvalidateAllPhysicalSlow(pVCpu);
875 continue;
876 }
877 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
878 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
879
880 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
881 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
882 }
883 VMCC_FOR_EACH_VMCPU_END(pVM);
884
885#else
886 RT_NOREF(pVM, idCpuCaller, enmReason);
887#endif
888}
889
890
891/**
892 * Flushes the prefetch buffer, light version.
893 */
894void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
895{
896#ifndef IEM_WITH_CODE_TLB
897 pVCpu->iem.s.cbOpcode = cbInstr;
898#else
899 RT_NOREF(pVCpu, cbInstr);
900#endif
901}
902
903
904/**
905 * Flushes the prefetch buffer, heavy version.
906 */
907void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
908{
909#ifndef IEM_WITH_CODE_TLB
910 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
911#elif 1
912 pVCpu->iem.s.cbInstrBufTotal = 0;
913 RT_NOREF(cbInstr);
914#else
915 RT_NOREF(pVCpu, cbInstr);
916#endif
917}
918
919
920
921#ifdef IEM_WITH_CODE_TLB
922
923/**
924 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
925 * failure and jumps.
926 *
927 * We end up here for a number of reasons:
928 * - pbInstrBuf isn't yet initialized.
929 * - Advancing beyond the buffer boundrary (e.g. cross page).
930 * - Advancing beyond the CS segment limit.
931 * - Fetching from non-mappable page (e.g. MMIO).
932 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
933 *
934 * @param pVCpu The cross context virtual CPU structure of the
935 * calling thread.
936 * @param pvDst Where to return the bytes.
937 * @param cbDst Number of bytes to read. A value of zero is
938 * allowed for initializing pbInstrBuf (the
939 * recompiler does this). In this case it is best
940 * to set pbInstrBuf to NULL prior to the call.
941 */
942void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
943{
944# ifdef IN_RING3
945 for (;;)
946 {
947 Assert(cbDst <= 8);
948 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
949
950 /*
951 * We might have a partial buffer match, deal with that first to make the
952 * rest simpler. This is the first part of the cross page/buffer case.
953 */
954 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
955 if (pbInstrBuf != NULL)
956 {
957 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
958 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
959 if (offBuf < cbInstrBuf)
960 {
961 Assert(offBuf + cbDst > cbInstrBuf);
962 uint32_t const cbCopy = cbInstrBuf - offBuf;
963 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
964
965 cbDst -= cbCopy;
966 pvDst = (uint8_t *)pvDst + cbCopy;
967 offBuf += cbCopy;
968 }
969 }
970
971 /*
972 * Check segment limit, figuring how much we're allowed to access at this point.
973 *
974 * We will fault immediately if RIP is past the segment limit / in non-canonical
975 * territory. If we do continue, there are one or more bytes to read before we
976 * end up in trouble and we need to do that first before faulting.
977 */
978 RTGCPTR GCPtrFirst;
979 uint32_t cbMaxRead;
980 if (IEM_IS_64BIT_CODE(pVCpu))
981 {
982 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
983 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
984 { /* likely */ }
985 else
986 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
987 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
988 }
989 else
990 {
991 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
992 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
993 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
994 { /* likely */ }
995 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
996 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
997 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
998 if (cbMaxRead != 0)
999 { /* likely */ }
1000 else
1001 {
1002 /* Overflowed because address is 0 and limit is max. */
1003 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1004 cbMaxRead = X86_PAGE_SIZE;
1005 }
1006 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1007 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1008 if (cbMaxRead2 < cbMaxRead)
1009 cbMaxRead = cbMaxRead2;
1010 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1011 }
1012
1013 /*
1014 * Get the TLB entry for this piece of code.
1015 */
1016 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
1017 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
1018 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
1019 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
1020 {
1021 /* likely when executing lots of code, otherwise unlikely */
1022# ifdef IEM_WITH_TLB_STATISTICS
1023 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
1024# endif
1025 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1026
1027 /* Check TLB page table level access flags. */
1028 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1029 {
1030 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
1031 {
1032 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1033 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1034 }
1035 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1036 {
1037 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1038 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1039 }
1040 }
1041
1042 /* Look up the physical page info if necessary. */
1043 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1044 { /* not necessary */ }
1045 else
1046 {
1047 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1048 { /* likely */ }
1049 else
1050 iemTlbInvalidateAllPhysicalSlow(pVCpu);
1051 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
1052 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1053 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1054 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1055 }
1056 }
1057 else
1058 {
1059 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
1060
1061 /* This page table walking will set A bits as required by the access while performing the walk.
1062 ASSUMES these are set when the address is translated rather than on commit... */
1063 /** @todo testcase: check when A bits are actually set by the CPU for code. */
1064 PGMPTWALKFAST WalkFast;
1065 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
1066 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1067 &WalkFast);
1068 if (RT_SUCCESS(rc))
1069 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1070 else
1071 {
1072# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1073 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
1074 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
1075# endif
1076 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1077 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
1078 }
1079
1080 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1081 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
1082 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
1083 {
1084 pTlbe--;
1085 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
1086 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1087 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1088# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
1089 else
1090 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
1091# endif
1092 }
1093 else
1094 {
1095 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
1096 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
1097 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1098 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1099# ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
1100 else
1101 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
1102# endif
1103 }
1104 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
1105 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
1106 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
1107 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1108 pTlbe->GCPhys = GCPhysPg;
1109 pTlbe->pbMappingR3 = NULL;
1110 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1111 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
1112 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1113
1114 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
1115 IEMTLBTRACE_LOAD( pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
1116 else
1117 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
1118
1119 /* Resolve the physical address. */
1120 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1121 { /* likely */ }
1122 else
1123 iemTlbInvalidateAllPhysicalSlow(pVCpu);
1124 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
1125 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1126 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1127 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1128 }
1129
1130# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1131 /*
1132 * Try do a direct read using the pbMappingR3 pointer.
1133 * Note! Do not recheck the physical TLB revision number here as we have the
1134 * wrong response to changes in the else case. If someone is updating
1135 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
1136 * pretending we always won the race.
1137 */
1138 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1139 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
1140 {
1141 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1142 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1143 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1144 {
1145 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1146 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1147 }
1148 else
1149 {
1150 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1151 if (cbInstr + (uint32_t)cbDst <= 15)
1152 {
1153 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1154 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1155 }
1156 else
1157 {
1158 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1159 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1160 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1161 }
1162 }
1163 if (cbDst <= cbMaxRead)
1164 {
1165 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1166# if 0 /* unused */
1167 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1168# endif
1169 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1170 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1171 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1172 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1173 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1174 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1175 else
1176 Assert(!pvDst);
1177 return;
1178 }
1179 pVCpu->iem.s.pbInstrBuf = NULL;
1180
1181 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1182 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1183 }
1184# else
1185# error "refactor as needed"
1186 /*
1187 * If there is no special read handling, so we can read a bit more and
1188 * put it in the prefetch buffer.
1189 */
1190 if ( cbDst < cbMaxRead
1191 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1192 {
1193 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1194 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1195 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1196 { /* likely */ }
1197 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1198 {
1199 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1200 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1201 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1202 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1203 }
1204 else
1205 {
1206 Log((RT_SUCCESS(rcStrict)
1207 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1208 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1209 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1210 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1211 }
1212 }
1213# endif
1214 /*
1215 * Special read handling, so only read exactly what's needed.
1216 * This is a highly unlikely scenario.
1217 */
1218 else
1219 {
1220 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
1221
1222 /* Check instruction length. */
1223 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1224 if (RT_LIKELY(cbInstr + cbDst <= 15))
1225 { /* likely */ }
1226 else
1227 {
1228 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1229 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1230 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1231 }
1232
1233 /* Do the reading. */
1234 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1235 if (cbToRead > 0)
1236 {
1237 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1238 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1239 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1240 { /* likely */ }
1241 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1242 {
1243 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1244 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1245 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1246 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1247 }
1248 else
1249 {
1250 Log((RT_SUCCESS(rcStrict)
1251 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1252 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1253 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1254 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1255 }
1256 }
1257
1258 /* Update the state and probably return. */
1259 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1260 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1261# if 0 /* unused */
1262 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1263# endif
1264 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1265 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1266 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1267 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1268 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1269 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1270 pVCpu->iem.s.pbInstrBuf = NULL;
1271 if (cbToRead == cbDst)
1272 return;
1273 Assert(cbToRead == cbMaxRead);
1274 }
1275
1276 /*
1277 * More to read, loop.
1278 */
1279 cbDst -= cbMaxRead;
1280 pvDst = (uint8_t *)pvDst + cbMaxRead;
1281 }
1282# else /* !IN_RING3 */
1283 RT_NOREF(pvDst, cbDst);
1284 if (pvDst || cbDst)
1285 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1286# endif /* !IN_RING3 */
1287}
1288
1289#else /* !IEM_WITH_CODE_TLB */
1290
1291/**
1292 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1293 * exception if it fails.
1294 *
1295 * @returns Strict VBox status code.
1296 * @param pVCpu The cross context virtual CPU structure of the
1297 * calling thread.
1298 * @param cbMin The minimum number of bytes relative offOpcode
1299 * that must be read.
1300 */
1301VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1302{
1303 /*
1304 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1305 *
1306 * First translate CS:rIP to a physical address.
1307 */
1308 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1309 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1310 uint8_t const cbLeft = cbOpcode - offOpcode;
1311 Assert(cbLeft < cbMin);
1312 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1313
1314 uint32_t cbToTryRead;
1315 RTGCPTR GCPtrNext;
1316 if (IEM_IS_64BIT_CODE(pVCpu))
1317 {
1318 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1319 if (!IEM_IS_CANONICAL(GCPtrNext))
1320 return iemRaiseGeneralProtectionFault0(pVCpu);
1321 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1322 }
1323 else
1324 {
1325 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1326 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1327 GCPtrNext32 += cbOpcode;
1328 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1329 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1330 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1331 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1332 if (!cbToTryRead) /* overflowed */
1333 {
1334 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1335 cbToTryRead = UINT32_MAX;
1336 /** @todo check out wrapping around the code segment. */
1337 }
1338 if (cbToTryRead < cbMin - cbLeft)
1339 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1340 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1341
1342 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1343 if (cbToTryRead > cbLeftOnPage)
1344 cbToTryRead = cbLeftOnPage;
1345 }
1346
1347 /* Restrict to opcode buffer space.
1348
1349 We're making ASSUMPTIONS here based on work done previously in
1350 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1351 be fetched in case of an instruction crossing two pages. */
1352 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1353 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1354 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1355 { /* likely */ }
1356 else
1357 {
1358 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1359 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1360 return iemRaiseGeneralProtectionFault0(pVCpu);
1361 }
1362
1363 PGMPTWALKFAST WalkFast;
1364 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
1365 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1366 &WalkFast);
1367 if (RT_SUCCESS(rc))
1368 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1369 else
1370 {
1371 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1372#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1373 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
1374 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1375#endif
1376 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1377 }
1378 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
1379 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1380
1381 RTGCPHYS const GCPhys = WalkFast.GCPhys;
1382 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1383
1384 /*
1385 * Read the bytes at this address.
1386 *
1387 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1388 * and since PATM should only patch the start of an instruction there
1389 * should be no need to check again here.
1390 */
1391 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1392 {
1393 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1394 cbToTryRead, PGMACCESSORIGIN_IEM);
1395 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1396 { /* likely */ }
1397 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1398 {
1399 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1400 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1401 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1402 }
1403 else
1404 {
1405 Log((RT_SUCCESS(rcStrict)
1406 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1407 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1408 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1409 return rcStrict;
1410 }
1411 }
1412 else
1413 {
1414 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1415 if (RT_SUCCESS(rc))
1416 { /* likely */ }
1417 else
1418 {
1419 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1420 return rc;
1421 }
1422 }
1423 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1424 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1425
1426 return VINF_SUCCESS;
1427}
1428
1429#endif /* !IEM_WITH_CODE_TLB */
1430#ifndef IEM_WITH_SETJMP
1431
1432/**
1433 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1434 *
1435 * @returns Strict VBox status code.
1436 * @param pVCpu The cross context virtual CPU structure of the
1437 * calling thread.
1438 * @param pb Where to return the opcode byte.
1439 */
1440VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1441{
1442 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1443 if (rcStrict == VINF_SUCCESS)
1444 {
1445 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1446 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1447 pVCpu->iem.s.offOpcode = offOpcode + 1;
1448 }
1449 else
1450 *pb = 0;
1451 return rcStrict;
1452}
1453
1454#else /* IEM_WITH_SETJMP */
1455
1456/**
1457 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1458 *
1459 * @returns The opcode byte.
1460 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1461 */
1462uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1463{
1464# ifdef IEM_WITH_CODE_TLB
1465 uint8_t u8;
1466 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1467 return u8;
1468# else
1469 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1470 if (rcStrict == VINF_SUCCESS)
1471 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1472 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1473# endif
1474}
1475
1476#endif /* IEM_WITH_SETJMP */
1477
1478#ifndef IEM_WITH_SETJMP
1479
1480/**
1481 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1482 *
1483 * @returns Strict VBox status code.
1484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1485 * @param pu16 Where to return the opcode dword.
1486 */
1487VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1488{
1489 uint8_t u8;
1490 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1491 if (rcStrict == VINF_SUCCESS)
1492 *pu16 = (int8_t)u8;
1493 return rcStrict;
1494}
1495
1496
1497/**
1498 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1499 *
1500 * @returns Strict VBox status code.
1501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1502 * @param pu32 Where to return the opcode dword.
1503 */
1504VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1505{
1506 uint8_t u8;
1507 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1508 if (rcStrict == VINF_SUCCESS)
1509 *pu32 = (int8_t)u8;
1510 return rcStrict;
1511}
1512
1513
1514/**
1515 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1516 *
1517 * @returns Strict VBox status code.
1518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1519 * @param pu64 Where to return the opcode qword.
1520 */
1521VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1522{
1523 uint8_t u8;
1524 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1525 if (rcStrict == VINF_SUCCESS)
1526 *pu64 = (int8_t)u8;
1527 return rcStrict;
1528}
1529
1530#endif /* !IEM_WITH_SETJMP */
1531
1532
1533#ifndef IEM_WITH_SETJMP
1534
1535/**
1536 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1537 *
1538 * @returns Strict VBox status code.
1539 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1540 * @param pu16 Where to return the opcode word.
1541 */
1542VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1543{
1544 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1545 if (rcStrict == VINF_SUCCESS)
1546 {
1547 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1548# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1549 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1550# else
1551 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1552# endif
1553 pVCpu->iem.s.offOpcode = offOpcode + 2;
1554 }
1555 else
1556 *pu16 = 0;
1557 return rcStrict;
1558}
1559
1560#else /* IEM_WITH_SETJMP */
1561
1562/**
1563 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1564 *
1565 * @returns The opcode word.
1566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1567 */
1568uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1569{
1570# ifdef IEM_WITH_CODE_TLB
1571 uint16_t u16;
1572 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1573 return u16;
1574# else
1575 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1576 if (rcStrict == VINF_SUCCESS)
1577 {
1578 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1579 pVCpu->iem.s.offOpcode += 2;
1580# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1581 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1582# else
1583 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1584# endif
1585 }
1586 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1587# endif
1588}
1589
1590#endif /* IEM_WITH_SETJMP */
1591
1592#ifndef IEM_WITH_SETJMP
1593
1594/**
1595 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1596 *
1597 * @returns Strict VBox status code.
1598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1599 * @param pu32 Where to return the opcode double word.
1600 */
1601VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1602{
1603 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1604 if (rcStrict == VINF_SUCCESS)
1605 {
1606 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1607 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1608 pVCpu->iem.s.offOpcode = offOpcode + 2;
1609 }
1610 else
1611 *pu32 = 0;
1612 return rcStrict;
1613}
1614
1615
1616/**
1617 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1618 *
1619 * @returns Strict VBox status code.
1620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1621 * @param pu64 Where to return the opcode quad word.
1622 */
1623VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1624{
1625 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1626 if (rcStrict == VINF_SUCCESS)
1627 {
1628 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1629 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1630 pVCpu->iem.s.offOpcode = offOpcode + 2;
1631 }
1632 else
1633 *pu64 = 0;
1634 return rcStrict;
1635}
1636
1637#endif /* !IEM_WITH_SETJMP */
1638
1639#ifndef IEM_WITH_SETJMP
1640
1641/**
1642 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1643 *
1644 * @returns Strict VBox status code.
1645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1646 * @param pu32 Where to return the opcode dword.
1647 */
1648VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1649{
1650 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1651 if (rcStrict == VINF_SUCCESS)
1652 {
1653 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1654# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1655 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1656# else
1657 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1658 pVCpu->iem.s.abOpcode[offOpcode + 1],
1659 pVCpu->iem.s.abOpcode[offOpcode + 2],
1660 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1661# endif
1662 pVCpu->iem.s.offOpcode = offOpcode + 4;
1663 }
1664 else
1665 *pu32 = 0;
1666 return rcStrict;
1667}
1668
1669#else /* IEM_WITH_SETJMP */
1670
1671/**
1672 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1673 *
1674 * @returns The opcode dword.
1675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1676 */
1677uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1678{
1679# ifdef IEM_WITH_CODE_TLB
1680 uint32_t u32;
1681 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1682 return u32;
1683# else
1684 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1685 if (rcStrict == VINF_SUCCESS)
1686 {
1687 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1688 pVCpu->iem.s.offOpcode = offOpcode + 4;
1689# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1690 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1691# else
1692 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1693 pVCpu->iem.s.abOpcode[offOpcode + 1],
1694 pVCpu->iem.s.abOpcode[offOpcode + 2],
1695 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1696# endif
1697 }
1698 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1699# endif
1700}
1701
1702#endif /* IEM_WITH_SETJMP */
1703
1704#ifndef IEM_WITH_SETJMP
1705
1706/**
1707 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1708 *
1709 * @returns Strict VBox status code.
1710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1711 * @param pu64 Where to return the opcode dword.
1712 */
1713VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1714{
1715 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1716 if (rcStrict == VINF_SUCCESS)
1717 {
1718 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1719 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1720 pVCpu->iem.s.abOpcode[offOpcode + 1],
1721 pVCpu->iem.s.abOpcode[offOpcode + 2],
1722 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1723 pVCpu->iem.s.offOpcode = offOpcode + 4;
1724 }
1725 else
1726 *pu64 = 0;
1727 return rcStrict;
1728}
1729
1730
1731/**
1732 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1733 *
1734 * @returns Strict VBox status code.
1735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1736 * @param pu64 Where to return the opcode qword.
1737 */
1738VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1739{
1740 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1741 if (rcStrict == VINF_SUCCESS)
1742 {
1743 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1744 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1745 pVCpu->iem.s.abOpcode[offOpcode + 1],
1746 pVCpu->iem.s.abOpcode[offOpcode + 2],
1747 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1748 pVCpu->iem.s.offOpcode = offOpcode + 4;
1749 }
1750 else
1751 *pu64 = 0;
1752 return rcStrict;
1753}
1754
1755#endif /* !IEM_WITH_SETJMP */
1756
1757#ifndef IEM_WITH_SETJMP
1758
1759/**
1760 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1761 *
1762 * @returns Strict VBox status code.
1763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1764 * @param pu64 Where to return the opcode qword.
1765 */
1766VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1767{
1768 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1769 if (rcStrict == VINF_SUCCESS)
1770 {
1771 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1772# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1773 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1774# else
1775 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1776 pVCpu->iem.s.abOpcode[offOpcode + 1],
1777 pVCpu->iem.s.abOpcode[offOpcode + 2],
1778 pVCpu->iem.s.abOpcode[offOpcode + 3],
1779 pVCpu->iem.s.abOpcode[offOpcode + 4],
1780 pVCpu->iem.s.abOpcode[offOpcode + 5],
1781 pVCpu->iem.s.abOpcode[offOpcode + 6],
1782 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1783# endif
1784 pVCpu->iem.s.offOpcode = offOpcode + 8;
1785 }
1786 else
1787 *pu64 = 0;
1788 return rcStrict;
1789}
1790
1791#else /* IEM_WITH_SETJMP */
1792
1793/**
1794 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1795 *
1796 * @returns The opcode qword.
1797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1798 */
1799uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1800{
1801# ifdef IEM_WITH_CODE_TLB
1802 uint64_t u64;
1803 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1804 return u64;
1805# else
1806 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1807 if (rcStrict == VINF_SUCCESS)
1808 {
1809 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1810 pVCpu->iem.s.offOpcode = offOpcode + 8;
1811# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1812 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1813# else
1814 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1815 pVCpu->iem.s.abOpcode[offOpcode + 1],
1816 pVCpu->iem.s.abOpcode[offOpcode + 2],
1817 pVCpu->iem.s.abOpcode[offOpcode + 3],
1818 pVCpu->iem.s.abOpcode[offOpcode + 4],
1819 pVCpu->iem.s.abOpcode[offOpcode + 5],
1820 pVCpu->iem.s.abOpcode[offOpcode + 6],
1821 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1822# endif
1823 }
1824 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1825# endif
1826}
1827
1828#endif /* IEM_WITH_SETJMP */
1829
1830
1831
1832/** @name Register Access.
1833 * @{
1834 */
1835
1836/**
1837 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
1838 *
1839 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1840 * segment limit.
1841 *
1842 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1843 * @param cbInstr Instruction size.
1844 * @param offNextInstr The offset of the next instruction.
1845 * @param enmEffOpSize Effective operand size.
1846 */
1847VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
1848 IEMMODE enmEffOpSize) RT_NOEXCEPT
1849{
1850 switch (enmEffOpSize)
1851 {
1852 case IEMMODE_16BIT:
1853 {
1854 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
1855 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
1856 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
1857 pVCpu->cpum.GstCtx.rip = uNewIp;
1858 else
1859 return iemRaiseGeneralProtectionFault0(pVCpu);
1860 break;
1861 }
1862
1863 case IEMMODE_32BIT:
1864 {
1865 Assert(!IEM_IS_64BIT_CODE(pVCpu));
1866 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
1867
1868 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
1869 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
1870 pVCpu->cpum.GstCtx.rip = uNewEip;
1871 else
1872 return iemRaiseGeneralProtectionFault0(pVCpu);
1873 break;
1874 }
1875
1876 case IEMMODE_64BIT:
1877 {
1878 Assert(IEM_IS_64BIT_CODE(pVCpu));
1879
1880 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
1881 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
1882 pVCpu->cpum.GstCtx.rip = uNewRip;
1883 else
1884 return iemRaiseGeneralProtectionFault0(pVCpu);
1885 break;
1886 }
1887
1888 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1889 }
1890
1891#ifndef IEM_WITH_CODE_TLB
1892 /* Flush the prefetch buffer. */
1893 pVCpu->iem.s.cbOpcode = cbInstr;
1894#endif
1895
1896 /*
1897 * Clear RF and finish the instruction (maybe raise #DB).
1898 */
1899 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
1900}
1901
1902
1903/**
1904 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
1905 *
1906 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1907 * segment limit.
1908 *
1909 * @returns Strict VBox status code.
1910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1911 * @param cbInstr Instruction size.
1912 * @param offNextInstr The offset of the next instruction.
1913 */
1914VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
1915{
1916 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
1917
1918 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
1919 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
1920 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
1921 pVCpu->cpum.GstCtx.rip = uNewIp;
1922 else
1923 return iemRaiseGeneralProtectionFault0(pVCpu);
1924
1925#ifndef IEM_WITH_CODE_TLB
1926 /* Flush the prefetch buffer. */
1927 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
1928#endif
1929
1930 /*
1931 * Clear RF and finish the instruction (maybe raise #DB).
1932 */
1933 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
1934}
1935
1936
1937/**
1938 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
1939 *
1940 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
1941 * segment limit.
1942 *
1943 * @returns Strict VBox status code.
1944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1945 * @param cbInstr Instruction size.
1946 * @param offNextInstr The offset of the next instruction.
1947 * @param enmEffOpSize Effective operand size.
1948 */
1949VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
1950 IEMMODE enmEffOpSize) RT_NOEXCEPT
1951{
1952 if (enmEffOpSize == IEMMODE_32BIT)
1953 {
1954 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
1955
1956 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
1957 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
1958 pVCpu->cpum.GstCtx.rip = uNewEip;
1959 else
1960 return iemRaiseGeneralProtectionFault0(pVCpu);
1961 }
1962 else
1963 {
1964 Assert(enmEffOpSize == IEMMODE_64BIT);
1965
1966 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
1967 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
1968 pVCpu->cpum.GstCtx.rip = uNewRip;
1969 else
1970 return iemRaiseGeneralProtectionFault0(pVCpu);
1971 }
1972
1973#ifndef IEM_WITH_CODE_TLB
1974 /* Flush the prefetch buffer. */
1975 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
1976#endif
1977
1978 /*
1979 * Clear RF and finish the instruction (maybe raise #DB).
1980 */
1981 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
1982}
1983
1984/** @} */
1985
1986
1987/** @name Memory access.
1988 *
1989 * @{
1990 */
1991
1992#undef LOG_GROUP
1993#define LOG_GROUP LOG_GROUP_IEM_MEM
1994
1995#if 0 /*unused*/
1996/**
1997 * Looks up a memory mapping entry.
1998 *
1999 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
2000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2001 * @param pvMem The memory address.
2002 * @param fAccess The access to.
2003 */
2004DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
2005{
2006 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
2007 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
2008 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
2009 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
2010 return 0;
2011 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
2012 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
2013 return 1;
2014 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
2015 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
2016 return 2;
2017 return VERR_NOT_FOUND;
2018}
2019#endif
2020
2021/**
2022 * Finds a free memmap entry when using iNextMapping doesn't work.
2023 *
2024 * @returns Memory mapping index, 1024 on failure.
2025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2026 */
2027static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
2028{
2029 /*
2030 * The easy case.
2031 */
2032 if (pVCpu->iem.s.cActiveMappings == 0)
2033 {
2034 pVCpu->iem.s.iNextMapping = 1;
2035 return 0;
2036 }
2037
2038 /* There should be enough mappings for all instructions. */
2039 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
2040
2041 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
2042 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
2043 return i;
2044
2045 AssertFailedReturn(1024);
2046}
2047
2048
2049/**
2050 * Commits a bounce buffer that needs writing back and unmaps it.
2051 *
2052 * @returns Strict VBox status code.
2053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2054 * @param iMemMap The index of the buffer to commit.
2055 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
2056 * Always false in ring-3, obviously.
2057 */
2058static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
2059{
2060 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
2061 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
2062#ifdef IN_RING3
2063 Assert(!fPostponeFail);
2064 RT_NOREF_PV(fPostponeFail);
2065#endif
2066
2067 /*
2068 * Do the writing.
2069 */
2070 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2071 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
2072 {
2073 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
2074 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
2075 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
2076 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
2077 {
2078 /*
2079 * Carefully and efficiently dealing with access handler return
2080 * codes make this a little bloated.
2081 */
2082 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
2083 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
2084 pbBuf,
2085 cbFirst,
2086 PGMACCESSORIGIN_IEM);
2087 if (rcStrict == VINF_SUCCESS)
2088 {
2089 if (cbSecond)
2090 {
2091 rcStrict = PGMPhysWrite(pVM,
2092 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
2093 pbBuf + cbFirst,
2094 cbSecond,
2095 PGMACCESSORIGIN_IEM);
2096 if (rcStrict == VINF_SUCCESS)
2097 { /* nothing */ }
2098 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2099 {
2100 LogEx(LOG_GROUP_IEM,
2101 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
2102 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
2103 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
2104 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2105 }
2106#ifndef IN_RING3
2107 else if (fPostponeFail)
2108 {
2109 LogEx(LOG_GROUP_IEM,
2110 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
2111 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
2112 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
2113 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
2114 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
2115 return iemSetPassUpStatus(pVCpu, rcStrict);
2116 }
2117#endif
2118 else
2119 {
2120 LogEx(LOG_GROUP_IEM,
2121 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
2122 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
2123 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
2124 return rcStrict;
2125 }
2126 }
2127 }
2128 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2129 {
2130 if (!cbSecond)
2131 {
2132 LogEx(LOG_GROUP_IEM,
2133 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
2134 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
2135 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2136 }
2137 else
2138 {
2139 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
2140 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
2141 pbBuf + cbFirst,
2142 cbSecond,
2143 PGMACCESSORIGIN_IEM);
2144 if (rcStrict2 == VINF_SUCCESS)
2145 {
2146 LogEx(LOG_GROUP_IEM,
2147 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
2148 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
2149 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
2150 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2151 }
2152 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2153 {
2154 LogEx(LOG_GROUP_IEM,
2155 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
2156 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
2157 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
2158 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2159 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2160 }
2161#ifndef IN_RING3
2162 else if (fPostponeFail)
2163 {
2164 LogEx(LOG_GROUP_IEM,
2165 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
2166 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
2167 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
2168 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
2169 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
2170 return iemSetPassUpStatus(pVCpu, rcStrict);
2171 }
2172#endif
2173 else
2174 {
2175 LogEx(LOG_GROUP_IEM,
2176 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
2177 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
2178 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
2179 return rcStrict2;
2180 }
2181 }
2182 }
2183#ifndef IN_RING3
2184 else if (fPostponeFail)
2185 {
2186 LogEx(LOG_GROUP_IEM,
2187 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
2188 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
2189 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
2190 if (!cbSecond)
2191 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
2192 else
2193 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
2194 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
2195 return iemSetPassUpStatus(pVCpu, rcStrict);
2196 }
2197#endif
2198 else
2199 {
2200 LogEx(LOG_GROUP_IEM,
2201 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
2202 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
2203 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
2204 return rcStrict;
2205 }
2206 }
2207 else
2208 {
2209 /*
2210 * No access handlers, much simpler.
2211 */
2212 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
2213 if (RT_SUCCESS(rc))
2214 {
2215 if (cbSecond)
2216 {
2217 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
2218 if (RT_SUCCESS(rc))
2219 { /* likely */ }
2220 else
2221 {
2222 LogEx(LOG_GROUP_IEM,
2223 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
2224 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
2225 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
2226 return rc;
2227 }
2228 }
2229 }
2230 else
2231 {
2232 LogEx(LOG_GROUP_IEM,
2233 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
2234 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
2235 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
2236 return rc;
2237 }
2238 }
2239 }
2240
2241#if defined(IEM_LOG_MEMORY_WRITES)
2242 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
2243 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
2244 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
2245 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
2246 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
2247 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
2248
2249 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
2250 g_cbIemWrote = cbWrote;
2251 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
2252#endif
2253
2254 /*
2255 * Free the mapping entry.
2256 */
2257 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
2258 Assert(pVCpu->iem.s.cActiveMappings != 0);
2259 pVCpu->iem.s.cActiveMappings--;
2260 return VINF_SUCCESS;
2261}
2262
2263
2264/**
2265 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
2266 * @todo duplicated
2267 */
2268DECL_FORCE_INLINE(uint32_t)
2269iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
2270{
2271 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
2272 if (fAccess & IEM_ACCESS_TYPE_WRITE)
2273 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
2274 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
2275}
2276
2277
2278/**
2279 * iemMemMap worker that deals with a request crossing pages.
2280 */
2281VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
2282 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess) RT_NOEXCEPT
2283{
2284 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
2285 Assert(cbMem <= GUEST_PAGE_SIZE);
2286
2287 /*
2288 * Do the address translations.
2289 */
2290 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
2291 RTGCPHYS GCPhysFirst;
2292 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
2293 if (rcStrict != VINF_SUCCESS)
2294 return rcStrict;
2295 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
2296
2297 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
2298 RTGCPHYS GCPhysSecond;
2299 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
2300 cbSecondPage, fAccess, &GCPhysSecond);
2301 if (rcStrict != VINF_SUCCESS)
2302 return rcStrict;
2303 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
2304 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
2305
2306 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2307
2308 /*
2309 * Check for data breakpoints.
2310 */
2311 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
2312 { /* likely */ }
2313 else
2314 {
2315 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
2316 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
2317 cbSecondPage, fAccess);
2318 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
2319 if (fDataBps > 1)
2320 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
2321 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2322 }
2323
2324 /*
2325 * Read in the current memory content if it's a read, execute or partial
2326 * write access.
2327 */
2328 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
2329
2330 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
2331 {
2332 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
2333 {
2334 /*
2335 * Must carefully deal with access handler status codes here,
2336 * makes the code a bit bloated.
2337 */
2338 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
2339 if (rcStrict == VINF_SUCCESS)
2340 {
2341 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
2342 if (rcStrict == VINF_SUCCESS)
2343 { /*likely */ }
2344 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2345 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2346 else
2347 {
2348 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
2349 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
2350 return rcStrict;
2351 }
2352 }
2353 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2354 {
2355 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
2356 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2357 {
2358 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2359 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2360 }
2361 else
2362 {
2363 LogEx(LOG_GROUP_IEM,
2364 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
2365 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
2366 return rcStrict2;
2367 }
2368 }
2369 else
2370 {
2371 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
2372 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
2373 return rcStrict;
2374 }
2375 }
2376 else
2377 {
2378 /*
2379 * No informational status codes here, much more straight forward.
2380 */
2381 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
2382 if (RT_SUCCESS(rc))
2383 {
2384 Assert(rc == VINF_SUCCESS);
2385 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
2386 if (RT_SUCCESS(rc))
2387 Assert(rc == VINF_SUCCESS);
2388 else
2389 {
2390 LogEx(LOG_GROUP_IEM,
2391 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
2392 return rc;
2393 }
2394 }
2395 else
2396 {
2397 LogEx(LOG_GROUP_IEM,
2398 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
2399 return rc;
2400 }
2401 }
2402 }
2403#ifdef VBOX_STRICT
2404 else
2405 memset(pbBuf, 0xcc, cbMem);
2406 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
2407 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
2408#endif
2409 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
2410
2411 /*
2412 * Commit the bounce buffer entry.
2413 */
2414 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
2415 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
2416 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
2417 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
2418 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
2419 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
2420 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
2421 pVCpu->iem.s.iNextMapping = iMemMap + 1;
2422 pVCpu->iem.s.cActiveMappings++;
2423
2424 *ppvMem = pbBuf;
2425 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
2426 return VINF_SUCCESS;
2427}
2428
2429
2430/**
2431 * iemMemMap woker that deals with iemMemPageMap failures.
2432 */
2433VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
2434 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap) RT_NOEXCEPT
2435{
2436 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
2437
2438 /*
2439 * Filter out conditions we can handle and the ones which shouldn't happen.
2440 */
2441 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
2442 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
2443 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
2444 {
2445 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
2446 return rcMap;
2447 }
2448 pVCpu->iem.s.cPotentialExits++;
2449
2450 /*
2451 * Read in the current memory content if it's a read, execute or partial
2452 * write access.
2453 */
2454 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
2455 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
2456 {
2457 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
2458 memset(pbBuf, 0xff, cbMem);
2459 else
2460 {
2461 int rc;
2462 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
2463 {
2464 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
2465 if (rcStrict == VINF_SUCCESS)
2466 { /* nothing */ }
2467 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2468 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2469 else
2470 {
2471 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
2472 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
2473 return rcStrict;
2474 }
2475 }
2476 else
2477 {
2478 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
2479 if (RT_SUCCESS(rc))
2480 { /* likely */ }
2481 else
2482 {
2483 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
2484 GCPhysFirst, rc));
2485 return rc;
2486 }
2487 }
2488 }
2489 }
2490#ifdef VBOX_STRICT
2491 else
2492 memset(pbBuf, 0xcc, cbMem);
2493#endif
2494#ifdef VBOX_STRICT
2495 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
2496 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
2497#endif
2498
2499 /*
2500 * Commit the bounce buffer entry.
2501 */
2502 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
2503 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
2504 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
2505 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
2506 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
2507 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
2508 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
2509 pVCpu->iem.s.iNextMapping = iMemMap + 1;
2510 pVCpu->iem.s.cActiveMappings++;
2511
2512 *ppvMem = pbBuf;
2513 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
2514 return VINF_SUCCESS;
2515}
2516
2517
2518
2519/**
2520 * Commits the guest memory if bounce buffered and unmaps it.
2521 *
2522 * @returns Strict VBox status code.
2523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2524 * @param bUnmapInfo Unmap info set by iemMemMap.
2525 */
2526VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
2527{
2528 uintptr_t const iMemMap = bUnmapInfo & 0x7;
2529 AssertMsgReturn( (bUnmapInfo & 0x08)
2530 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
2531 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
2532 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
2533 VERR_NOT_FOUND);
2534
2535 /* If it's bounce buffered, we may need to write back the buffer. */
2536 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
2537 {
2538 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
2539 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
2540 }
2541 /* Otherwise unlock it. */
2542 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
2543 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
2544
2545 /* Free the entry. */
2546 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
2547 Assert(pVCpu->iem.s.cActiveMappings != 0);
2548 pVCpu->iem.s.cActiveMappings--;
2549 return VINF_SUCCESS;
2550}
2551
2552
2553/**
2554 * Rolls back the guest memory (conceptually only) and unmaps it.
2555 *
2556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2557 * @param bUnmapInfo Unmap info set by iemMemMap.
2558 */
2559void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
2560{
2561 uintptr_t const iMemMap = bUnmapInfo & 0x7;
2562 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
2563 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
2564 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
2565 == ((unsigned)bUnmapInfo >> 4),
2566 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
2567
2568 /* Unlock it if necessary. */
2569 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
2570 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
2571
2572 /* Free the entry. */
2573 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
2574 Assert(pVCpu->iem.s.cActiveMappings != 0);
2575 pVCpu->iem.s.cActiveMappings--;
2576}
2577
2578#ifdef IEM_WITH_SETJMP
2579
2580/**
2581 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
2582 *
2583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2584 * @param pvMem The mapping.
2585 * @param fAccess The kind of access.
2586 */
2587void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
2588{
2589 uintptr_t const iMemMap = bUnmapInfo & 0x7;
2590 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
2591 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
2592 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
2593 == ((unsigned)bUnmapInfo >> 4),
2594 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
2595
2596 /* If it's bounce buffered, we may need to write back the buffer. */
2597 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
2598 {
2599 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
2600 {
2601 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
2602 if (rcStrict == VINF_SUCCESS)
2603 return;
2604 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
2605 }
2606 }
2607 /* Otherwise unlock it. */
2608 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
2609 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
2610
2611 /* Free the entry. */
2612 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
2613 Assert(pVCpu->iem.s.cActiveMappings != 0);
2614 pVCpu->iem.s.cActiveMappings--;
2615}
2616
2617
2618/** Fallback for iemMemCommitAndUnmapRwJmp. */
2619void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
2620{
2621 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
2622 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
2623}
2624
2625
2626/** Fallback for iemMemCommitAndUnmapAtJmp. */
2627void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
2628{
2629 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
2630 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
2631}
2632
2633
2634/** Fallback for iemMemCommitAndUnmapWoJmp. */
2635void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
2636{
2637 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
2638 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
2639}
2640
2641
2642/** Fallback for iemMemCommitAndUnmapRoJmp. */
2643void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
2644{
2645 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
2646 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
2647}
2648
2649
2650/** Fallback for iemMemRollbackAndUnmapWo. */
2651void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
2652{
2653 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
2654 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
2655}
2656
2657#endif /* IEM_WITH_SETJMP */
2658
2659#ifndef IN_RING3
2660/**
2661 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
2662 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
2663 *
2664 * Allows the instruction to be completed and retired, while the IEM user will
2665 * return to ring-3 immediately afterwards and do the postponed writes there.
2666 *
2667 * @returns VBox status code (no strict statuses). Caller must check
2668 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
2669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2670 * @param pvMem The mapping.
2671 * @param fAccess The kind of access.
2672 */
2673VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
2674{
2675 uintptr_t const iMemMap = bUnmapInfo & 0x7;
2676 AssertMsgReturn( (bUnmapInfo & 0x08)
2677 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
2678 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
2679 == ((unsigned)bUnmapInfo >> 4),
2680 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
2681 VERR_NOT_FOUND);
2682
2683 /* If it's bounce buffered, we may need to write back the buffer. */
2684 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
2685 {
2686 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
2687 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
2688 }
2689 /* Otherwise unlock it. */
2690 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
2691 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
2692
2693 /* Free the entry. */
2694 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
2695 Assert(pVCpu->iem.s.cActiveMappings != 0);
2696 pVCpu->iem.s.cActiveMappings--;
2697 return VINF_SUCCESS;
2698}
2699#endif
2700
2701
2702/**
2703 * Rollbacks mappings, releasing page locks and such.
2704 *
2705 * The caller shall only call this after checking cActiveMappings.
2706 *
2707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2708 */
2709void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
2710{
2711 Assert(pVCpu->iem.s.cActiveMappings > 0);
2712
2713 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
2714 while (iMemMap-- > 0)
2715 {
2716 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
2717 if (fAccess != IEM_ACCESS_INVALID)
2718 {
2719 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
2720 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
2721 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
2722 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
2723 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
2724 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
2725 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
2726 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
2727 pVCpu->iem.s.cActiveMappings--;
2728 }
2729 }
2730}
2731
2732#undef LOG_GROUP
2733#define LOG_GROUP LOG_GROUP_IEM
2734
2735/** @} */
2736
2737/** @name Opcode Helpers.
2738 * @{
2739 */
2740
2741/**
2742 * Calculates the effective address of a ModR/M memory operand.
2743 *
2744 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
2745 *
2746 * @return Strict VBox status code.
2747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2748 * @param bRm The ModRM byte.
2749 * @param cbImmAndRspOffset - First byte: The size of any immediate
2750 * following the effective address opcode bytes
2751 * (only for RIP relative addressing).
2752 * - Second byte: RSP displacement (for POP [ESP]).
2753 * @param pGCPtrEff Where to return the effective address.
2754 */
2755VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
2756{
2757 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
2758# define SET_SS_DEF() \
2759 do \
2760 { \
2761 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
2762 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
2763 } while (0)
2764
2765 if (!IEM_IS_64BIT_CODE(pVCpu))
2766 {
2767/** @todo Check the effective address size crap! */
2768 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
2769 {
2770 uint16_t u16EffAddr;
2771
2772 /* Handle the disp16 form with no registers first. */
2773 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
2774 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
2775 else
2776 {
2777 /* Get the displacment. */
2778 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2779 {
2780 case 0: u16EffAddr = 0; break;
2781 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
2782 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
2783 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
2784 }
2785
2786 /* Add the base and index registers to the disp. */
2787 switch (bRm & X86_MODRM_RM_MASK)
2788 {
2789 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
2790 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
2791 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
2792 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
2793 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
2794 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
2795 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
2796 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
2797 }
2798 }
2799
2800 *pGCPtrEff = u16EffAddr;
2801 }
2802 else
2803 {
2804 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
2805 uint32_t u32EffAddr;
2806
2807 /* Handle the disp32 form with no registers first. */
2808 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
2809 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
2810 else
2811 {
2812 /* Get the register (or SIB) value. */
2813 switch ((bRm & X86_MODRM_RM_MASK))
2814 {
2815 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
2816 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
2817 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
2818 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
2819 case 4: /* SIB */
2820 {
2821 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
2822
2823 /* Get the index and scale it. */
2824 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
2825 {
2826 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
2827 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
2828 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
2829 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
2830 case 4: u32EffAddr = 0; /*none */ break;
2831 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
2832 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
2833 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
2834 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2835 }
2836 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
2837
2838 /* add base */
2839 switch (bSib & X86_SIB_BASE_MASK)
2840 {
2841 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
2842 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
2843 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
2844 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
2845 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
2846 case 5:
2847 if ((bRm & X86_MODRM_MOD_MASK) != 0)
2848 {
2849 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
2850 SET_SS_DEF();
2851 }
2852 else
2853 {
2854 uint32_t u32Disp;
2855 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
2856 u32EffAddr += u32Disp;
2857 }
2858 break;
2859 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
2860 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
2861 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2862 }
2863 break;
2864 }
2865 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
2866 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
2867 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
2868 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2869 }
2870
2871 /* Get and add the displacement. */
2872 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2873 {
2874 case 0:
2875 break;
2876 case 1:
2877 {
2878 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
2879 u32EffAddr += i8Disp;
2880 break;
2881 }
2882 case 2:
2883 {
2884 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
2885 u32EffAddr += u32Disp;
2886 break;
2887 }
2888 default:
2889 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
2890 }
2891
2892 }
2893 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
2894 *pGCPtrEff = u32EffAddr;
2895 }
2896 }
2897 else
2898 {
2899 uint64_t u64EffAddr;
2900
2901 /* Handle the rip+disp32 form with no registers first. */
2902 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
2903 {
2904 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
2905 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
2906 }
2907 else
2908 {
2909 /* Get the register (or SIB) value. */
2910 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
2911 {
2912 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
2913 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
2914 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
2915 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
2916 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
2917 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
2918 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
2919 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
2920 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
2921 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
2922 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
2923 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
2924 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
2925 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
2926 /* SIB */
2927 case 4:
2928 case 12:
2929 {
2930 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
2931
2932 /* Get the index and scale it. */
2933 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
2934 {
2935 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
2936 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
2937 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
2938 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
2939 case 4: u64EffAddr = 0; /*none */ break;
2940 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
2941 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
2942 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
2943 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
2944 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
2945 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
2946 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
2947 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
2948 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
2949 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
2950 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
2951 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2952 }
2953 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
2954
2955 /* add base */
2956 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
2957 {
2958 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
2959 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
2960 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
2961 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
2962 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
2963 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
2964 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
2965 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
2966 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
2967 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
2968 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
2969 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
2970 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
2971 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
2972 /* complicated encodings */
2973 case 5:
2974 case 13:
2975 if ((bRm & X86_MODRM_MOD_MASK) != 0)
2976 {
2977 if (!pVCpu->iem.s.uRexB)
2978 {
2979 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
2980 SET_SS_DEF();
2981 }
2982 else
2983 u64EffAddr += pVCpu->cpum.GstCtx.r13;
2984 }
2985 else
2986 {
2987 uint32_t u32Disp;
2988 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
2989 u64EffAddr += (int32_t)u32Disp;
2990 }
2991 break;
2992 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2993 }
2994 break;
2995 }
2996 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2997 }
2998
2999 /* Get and add the displacement. */
3000 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
3001 {
3002 case 0:
3003 break;
3004 case 1:
3005 {
3006 int8_t i8Disp;
3007 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
3008 u64EffAddr += i8Disp;
3009 break;
3010 }
3011 case 2:
3012 {
3013 uint32_t u32Disp;
3014 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
3015 u64EffAddr += (int32_t)u32Disp;
3016 break;
3017 }
3018 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
3019 }
3020
3021 }
3022
3023 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
3024 *pGCPtrEff = u64EffAddr;
3025 else
3026 {
3027 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
3028 *pGCPtrEff = u64EffAddr & UINT32_MAX;
3029 }
3030 }
3031
3032 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
3033 return VINF_SUCCESS;
3034}
3035
3036
3037#ifdef IEM_WITH_SETJMP
3038/**
3039 * Calculates the effective address of a ModR/M memory operand.
3040 *
3041 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
3042 *
3043 * May longjmp on internal error.
3044 *
3045 * @return The effective address.
3046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3047 * @param bRm The ModRM byte.
3048 * @param cbImmAndRspOffset - First byte: The size of any immediate
3049 * following the effective address opcode bytes
3050 * (only for RIP relative addressing).
3051 * - Second byte: RSP displacement (for POP [ESP]).
3052 */
3053RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
3054{
3055 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
3056# define SET_SS_DEF() \
3057 do \
3058 { \
3059 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
3060 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
3061 } while (0)
3062
3063 if (!IEM_IS_64BIT_CODE(pVCpu))
3064 {
3065/** @todo Check the effective address size crap! */
3066 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
3067 {
3068 uint16_t u16EffAddr;
3069
3070 /* Handle the disp16 form with no registers first. */
3071 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
3072 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
3073 else
3074 {
3075 /* Get the displacment. */
3076 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
3077 {
3078 case 0: u16EffAddr = 0; break;
3079 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
3080 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
3081 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
3082 }
3083
3084 /* Add the base and index registers to the disp. */
3085 switch (bRm & X86_MODRM_RM_MASK)
3086 {
3087 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
3088 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
3089 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
3090 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
3091 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
3092 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
3093 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
3094 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
3095 }
3096 }
3097
3098 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
3099 return u16EffAddr;
3100 }
3101
3102 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
3103 uint32_t u32EffAddr;
3104
3105 /* Handle the disp32 form with no registers first. */
3106 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
3107 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
3108 else
3109 {
3110 /* Get the register (or SIB) value. */
3111 switch ((bRm & X86_MODRM_RM_MASK))
3112 {
3113 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
3114 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
3115 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
3116 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
3117 case 4: /* SIB */
3118 {
3119 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
3120
3121 /* Get the index and scale it. */
3122 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
3123 {
3124 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
3125 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
3126 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
3127 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
3128 case 4: u32EffAddr = 0; /*none */ break;
3129 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
3130 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
3131 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
3132 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
3133 }
3134 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
3135
3136 /* add base */
3137 switch (bSib & X86_SIB_BASE_MASK)
3138 {
3139 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
3140 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
3141 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
3142 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
3143 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
3144 case 5:
3145 if ((bRm & X86_MODRM_MOD_MASK) != 0)
3146 {
3147 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
3148 SET_SS_DEF();
3149 }
3150 else
3151 {
3152 uint32_t u32Disp;
3153 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
3154 u32EffAddr += u32Disp;
3155 }
3156 break;
3157 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
3158 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
3159 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
3160 }
3161 break;
3162 }
3163 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
3164 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
3165 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
3166 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
3167 }
3168
3169 /* Get and add the displacement. */
3170 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
3171 {
3172 case 0:
3173 break;
3174 case 1:
3175 {
3176 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
3177 u32EffAddr += i8Disp;
3178 break;
3179 }
3180 case 2:
3181 {
3182 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
3183 u32EffAddr += u32Disp;
3184 break;
3185 }
3186 default:
3187 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
3188 }
3189 }
3190
3191 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
3192 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
3193 return u32EffAddr;
3194 }
3195
3196 uint64_t u64EffAddr;
3197
3198 /* Handle the rip+disp32 form with no registers first. */
3199 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
3200 {
3201 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
3202 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
3203 }
3204 else
3205 {
3206 /* Get the register (or SIB) value. */
3207 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
3208 {
3209 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
3210 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
3211 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
3212 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
3213 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
3214 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
3215 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
3216 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
3217 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
3218 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
3219 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
3220 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
3221 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
3222 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
3223 /* SIB */
3224 case 4:
3225 case 12:
3226 {
3227 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
3228
3229 /* Get the index and scale it. */
3230 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
3231 {
3232 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
3233 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
3234 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
3235 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
3236 case 4: u64EffAddr = 0; /*none */ break;
3237 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
3238 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
3239 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
3240 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
3241 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
3242 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
3243 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
3244 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
3245 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
3246 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
3247 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
3248 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
3249 }
3250 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
3251
3252 /* add base */
3253 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
3254 {
3255 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
3256 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
3257 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
3258 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
3259 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
3260 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
3261 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
3262 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
3263 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
3264 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
3265 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
3266 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
3267 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
3268 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
3269 /* complicated encodings */
3270 case 5:
3271 case 13:
3272 if ((bRm & X86_MODRM_MOD_MASK) != 0)
3273 {
3274 if (!pVCpu->iem.s.uRexB)
3275 {
3276 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
3277 SET_SS_DEF();
3278 }
3279 else
3280 u64EffAddr += pVCpu->cpum.GstCtx.r13;
3281 }
3282 else
3283 {
3284 uint32_t u32Disp;
3285 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
3286 u64EffAddr += (int32_t)u32Disp;
3287 }
3288 break;
3289 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
3290 }
3291 break;
3292 }
3293 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
3294 }
3295
3296 /* Get and add the displacement. */
3297 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
3298 {
3299 case 0:
3300 break;
3301 case 1:
3302 {
3303 int8_t i8Disp;
3304 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
3305 u64EffAddr += i8Disp;
3306 break;
3307 }
3308 case 2:
3309 {
3310 uint32_t u32Disp;
3311 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
3312 u64EffAddr += (int32_t)u32Disp;
3313 break;
3314 }
3315 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
3316 }
3317
3318 }
3319
3320 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
3321 {
3322 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
3323 return u64EffAddr;
3324 }
3325 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
3326 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
3327 return u64EffAddr & UINT32_MAX;
3328}
3329#endif /* IEM_WITH_SETJMP */
3330
3331
3332/**
3333 * Calculates the effective address of a ModR/M memory operand, extended version
3334 * for use in the recompilers.
3335 *
3336 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
3337 *
3338 * @return Strict VBox status code.
3339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3340 * @param bRm The ModRM byte.
3341 * @param cbImmAndRspOffset - First byte: The size of any immediate
3342 * following the effective address opcode bytes
3343 * (only for RIP relative addressing).
3344 * - Second byte: RSP displacement (for POP [ESP]).
3345 * @param pGCPtrEff Where to return the effective address.
3346 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
3347 * SIB byte (bits 39:32).
3348 */
3349VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
3350{
3351 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
3352# define SET_SS_DEF() \
3353 do \
3354 { \
3355 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
3356 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
3357 } while (0)
3358
3359 uint64_t uInfo;
3360 if (!IEM_IS_64BIT_CODE(pVCpu))
3361 {
3362/** @todo Check the effective address size crap! */
3363 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
3364 {
3365 uint16_t u16EffAddr;
3366
3367 /* Handle the disp16 form with no registers first. */
3368 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
3369 {
3370 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
3371 uInfo = u16EffAddr;
3372 }
3373 else
3374 {
3375 /* Get the displacment. */
3376 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
3377 {
3378 case 0: u16EffAddr = 0; break;
3379 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
3380 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
3381 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
3382 }
3383 uInfo = u16EffAddr;
3384
3385 /* Add the base and index registers to the disp. */
3386 switch (bRm & X86_MODRM_RM_MASK)
3387 {
3388 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
3389 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
3390 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
3391 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
3392 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
3393 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
3394 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
3395 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
3396 }
3397 }
3398
3399 *pGCPtrEff = u16EffAddr;
3400 }
3401 else
3402 {
3403 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
3404 uint32_t u32EffAddr;
3405
3406 /* Handle the disp32 form with no registers first. */
3407 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
3408 {
3409 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
3410 uInfo = u32EffAddr;
3411 }
3412 else
3413 {
3414 /* Get the register (or SIB) value. */
3415 uInfo = 0;
3416 switch ((bRm & X86_MODRM_RM_MASK))
3417 {
3418 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
3419 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
3420 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
3421 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
3422 case 4: /* SIB */
3423 {
3424 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
3425 uInfo = (uint64_t)bSib << 32;
3426
3427 /* Get the index and scale it. */
3428 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
3429 {
3430 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
3431 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
3432 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
3433 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
3434 case 4: u32EffAddr = 0; /*none */ break;
3435 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
3436 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
3437 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
3438 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3439 }
3440 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
3441
3442 /* add base */
3443 switch (bSib & X86_SIB_BASE_MASK)
3444 {
3445 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
3446 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
3447 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
3448 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
3449 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
3450 case 5:
3451 if ((bRm & X86_MODRM_MOD_MASK) != 0)
3452 {
3453 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
3454 SET_SS_DEF();
3455 }
3456 else
3457 {
3458 uint32_t u32Disp;
3459 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
3460 u32EffAddr += u32Disp;
3461 uInfo |= u32Disp;
3462 }
3463 break;
3464 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
3465 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
3466 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3467 }
3468 break;
3469 }
3470 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
3471 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
3472 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
3473 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3474 }
3475
3476 /* Get and add the displacement. */
3477 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
3478 {
3479 case 0:
3480 break;
3481 case 1:
3482 {
3483 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
3484 u32EffAddr += i8Disp;
3485 uInfo |= (uint32_t)(int32_t)i8Disp;
3486 break;
3487 }
3488 case 2:
3489 {
3490 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
3491 u32EffAddr += u32Disp;
3492 uInfo |= (uint32_t)u32Disp;
3493 break;
3494 }
3495 default:
3496 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
3497 }
3498
3499 }
3500 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
3501 *pGCPtrEff = u32EffAddr;
3502 }
3503 }
3504 else
3505 {
3506 uint64_t u64EffAddr;
3507
3508 /* Handle the rip+disp32 form with no registers first. */
3509 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
3510 {
3511 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
3512 uInfo = (uint32_t)u64EffAddr;
3513 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
3514 }
3515 else
3516 {
3517 /* Get the register (or SIB) value. */
3518 uInfo = 0;
3519 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
3520 {
3521 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
3522 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
3523 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
3524 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
3525 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
3526 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
3527 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
3528 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
3529 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
3530 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
3531 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
3532 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
3533 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
3534 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
3535 /* SIB */
3536 case 4:
3537 case 12:
3538 {
3539 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
3540 uInfo = (uint64_t)bSib << 32;
3541
3542 /* Get the index and scale it. */
3543 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
3544 {
3545 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
3546 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
3547 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
3548 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
3549 case 4: u64EffAddr = 0; /*none */ break;
3550 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
3551 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
3552 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
3553 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
3554 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
3555 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
3556 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
3557 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
3558 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
3559 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
3560 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
3561 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3562 }
3563 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
3564
3565 /* add base */
3566 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
3567 {
3568 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
3569 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
3570 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
3571 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
3572 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
3573 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
3574 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
3575 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
3576 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
3577 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
3578 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
3579 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
3580 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
3581 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
3582 /* complicated encodings */
3583 case 5:
3584 case 13:
3585 if ((bRm & X86_MODRM_MOD_MASK) != 0)
3586 {
3587 if (!pVCpu->iem.s.uRexB)
3588 {
3589 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
3590 SET_SS_DEF();
3591 }
3592 else
3593 u64EffAddr += pVCpu->cpum.GstCtx.r13;
3594 }
3595 else
3596 {
3597 uint32_t u32Disp;
3598 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
3599 u64EffAddr += (int32_t)u32Disp;
3600 uInfo |= u32Disp;
3601 }
3602 break;
3603 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3604 }
3605 break;
3606 }
3607 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3608 }
3609
3610 /* Get and add the displacement. */
3611 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
3612 {
3613 case 0:
3614 break;
3615 case 1:
3616 {
3617 int8_t i8Disp;
3618 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
3619 u64EffAddr += i8Disp;
3620 uInfo |= (uint32_t)(int32_t)i8Disp;
3621 break;
3622 }
3623 case 2:
3624 {
3625 uint32_t u32Disp;
3626 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
3627 u64EffAddr += (int32_t)u32Disp;
3628 uInfo |= u32Disp;
3629 break;
3630 }
3631 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
3632 }
3633
3634 }
3635
3636 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
3637 *pGCPtrEff = u64EffAddr;
3638 else
3639 {
3640 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
3641 *pGCPtrEff = u64EffAddr & UINT32_MAX;
3642 }
3643 }
3644 *puInfo = uInfo;
3645
3646 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
3647 return VINF_SUCCESS;
3648}
3649
3650/** @} */
3651
3652
3653#ifdef LOG_ENABLED
3654/**
3655 * Logs the current instruction.
3656 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3657 * @param fSameCtx Set if we have the same context information as the VMM,
3658 * clear if we may have already executed an instruction in
3659 * our debug context. When clear, we assume IEMCPU holds
3660 * valid CPU mode info.
3661 *
3662 * The @a fSameCtx parameter is now misleading and obsolete.
3663 * @param pszFunction The IEM function doing the execution.
3664 */
3665static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
3666{
3667# ifdef IN_RING3
3668 if (LogIs2Enabled())
3669 {
3670 char szInstr[256];
3671 uint32_t cbInstr = 0;
3672 if (fSameCtx)
3673 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
3674 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3675 szInstr, sizeof(szInstr), &cbInstr);
3676 else
3677 {
3678 uint32_t fFlags = 0;
3679 switch (IEM_GET_CPU_MODE(pVCpu))
3680 {
3681 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
3682 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
3683 case IEMMODE_16BIT:
3684 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
3685 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
3686 else
3687 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
3688 break;
3689 }
3690 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
3691 szInstr, sizeof(szInstr), &cbInstr);
3692 }
3693
3694 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
3695 Log2(("**** %s fExec=%x\n"
3696 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
3697 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
3698 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
3699 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
3700 " %s\n"
3701 , pszFunction, pVCpu->iem.s.fExec,
3702 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
3703 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
3704 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
3705 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
3706 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
3707 szInstr));
3708
3709 /* This stuff sucks atm. as it fills the log with MSRs. */
3710 //if (LogIs3Enabled())
3711 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
3712 }
3713 else
3714# endif
3715 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
3716 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
3717 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
3718}
3719#endif /* LOG_ENABLED */
3720
3721
3722#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3723/**
3724 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
3725 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
3726 *
3727 * @returns Modified rcStrict.
3728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3729 * @param rcStrict The instruction execution status.
3730 */
3731static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
3732{
3733 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
3734 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
3735 {
3736 /* VMX preemption timer takes priority over NMI-window exits. */
3737 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
3738 {
3739 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
3740 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
3741 }
3742 /*
3743 * Check remaining intercepts.
3744 *
3745 * NMI-window and Interrupt-window VM-exits.
3746 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
3747 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
3748 *
3749 * See Intel spec. 26.7.6 "NMI-Window Exiting".
3750 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
3751 */
3752 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
3753 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
3754 && !TRPMHasTrap(pVCpu))
3755 {
3756 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
3757 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
3758 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
3759 {
3760 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
3761 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
3762 }
3763 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
3764 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
3765 {
3766 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
3767 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
3768 }
3769 }
3770 }
3771 /* TPR-below threshold/APIC write has the highest priority. */
3772 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3773 {
3774 rcStrict = iemVmxApicWriteEmulation(pVCpu);
3775 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
3776 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
3777 }
3778 /* MTF takes priority over VMX-preemption timer. */
3779 else
3780 {
3781 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
3782 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
3783 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
3784 }
3785 return rcStrict;
3786}
3787#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3788
3789
3790/**
3791 * The actual code execution bits of IEMExecOne, IEMExecOneWithPrefetchedByPC,
3792 * IEMExecOneBypass and friends.
3793 *
3794 * Similar code is found in IEMExecLots.
3795 *
3796 * @return Strict VBox status code.
3797 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3798 * @param fExecuteInhibit If set, execute the instruction following CLI,
3799 * POP SS and MOV SS,GR.
3800 * @param pszFunction The calling function name.
3801 */
3802DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
3803{
3804 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
3805 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
3806 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
3807 RT_NOREF_PV(pszFunction);
3808
3809#ifdef IEM_WITH_SETJMP
3810 VBOXSTRICTRC rcStrict;
3811 IEM_TRY_SETJMP(pVCpu, rcStrict)
3812 {
3813 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
3814 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
3815 }
3816 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
3817 {
3818 pVCpu->iem.s.cLongJumps++;
3819 }
3820 IEM_CATCH_LONGJMP_END(pVCpu);
3821#else
3822 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
3823 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
3824#endif
3825 if (rcStrict == VINF_SUCCESS)
3826 pVCpu->iem.s.cInstructions++;
3827 if (pVCpu->iem.s.cActiveMappings > 0)
3828 {
3829 Assert(rcStrict != VINF_SUCCESS);
3830 iemMemRollback(pVCpu);
3831 }
3832 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
3833 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
3834 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
3835
3836//#ifdef DEBUG
3837// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
3838//#endif
3839
3840#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3841 /*
3842 * Perform any VMX nested-guest instruction boundary actions.
3843 *
3844 * If any of these causes a VM-exit, we must skip executing the next
3845 * instruction (would run into stale page tables). A VM-exit makes sure
3846 * there is no interrupt-inhibition, so that should ensure we don't go
3847 * to try execute the next instruction. Clearing fExecuteInhibit is
3848 * problematic because of the setjmp/longjmp clobbering above.
3849 */
3850 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
3851 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
3852 || rcStrict != VINF_SUCCESS)
3853 { /* likely */ }
3854 else
3855 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
3856#endif
3857
3858 /* Execute the next instruction as well if a cli, pop ss or
3859 mov ss, Gr has just completed successfully. */
3860 if ( fExecuteInhibit
3861 && rcStrict == VINF_SUCCESS
3862 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
3863 {
3864 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
3865 if (rcStrict == VINF_SUCCESS)
3866 {
3867#ifdef LOG_ENABLED
3868 iemLogCurInstr(pVCpu, false, pszFunction);
3869#endif
3870#ifdef IEM_WITH_SETJMP
3871 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
3872 {
3873 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
3874 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
3875 }
3876 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
3877 {
3878 pVCpu->iem.s.cLongJumps++;
3879 }
3880 IEM_CATCH_LONGJMP_END(pVCpu);
3881#else
3882 IEM_OPCODE_GET_FIRST_U8(&b);
3883 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
3884#endif
3885 if (rcStrict == VINF_SUCCESS)
3886 {
3887 pVCpu->iem.s.cInstructions++;
3888#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3889 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
3890 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
3891 { /* likely */ }
3892 else
3893 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
3894#endif
3895 }
3896 if (pVCpu->iem.s.cActiveMappings > 0)
3897 {
3898 Assert(rcStrict != VINF_SUCCESS);
3899 iemMemRollback(pVCpu);
3900 }
3901 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
3902 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
3903 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
3904 }
3905 else if (pVCpu->iem.s.cActiveMappings > 0)
3906 iemMemRollback(pVCpu);
3907 /** @todo drop this after we bake this change into RIP advancing. */
3908 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
3909 }
3910
3911 /*
3912 * Return value fiddling, statistics and sanity assertions.
3913 */
3914 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
3915
3916 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3917 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
3918 return rcStrict;
3919}
3920
3921
3922/**
3923 * Execute one instruction.
3924 *
3925 * @return Strict VBox status code.
3926 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3927 */
3928VMM_INT_DECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
3929{
3930 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
3931#ifdef LOG_ENABLED
3932 iemLogCurInstr(pVCpu, true, "IEMExecOne");
3933#endif
3934
3935 /*
3936 * Do the decoding and emulation.
3937 */
3938 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
3939 if (rcStrict == VINF_SUCCESS)
3940 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
3941 else if (pVCpu->iem.s.cActiveMappings > 0)
3942 iemMemRollback(pVCpu);
3943
3944 if (rcStrict != VINF_SUCCESS)
3945 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
3946 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
3947 return rcStrict;
3948}
3949
3950
3951VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
3952 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
3953{
3954 VBOXSTRICTRC rcStrict;
3955 if ( cbOpcodeBytes
3956 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
3957 {
3958 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
3959#ifdef IEM_WITH_CODE_TLB
3960 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
3961 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
3962 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
3963 pVCpu->iem.s.offCurInstrStart = 0;
3964 pVCpu->iem.s.offInstrNextByte = 0;
3965 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
3966#else
3967 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
3968 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
3969#endif
3970 rcStrict = VINF_SUCCESS;
3971 }
3972 else
3973 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
3974 if (rcStrict == VINF_SUCCESS)
3975 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
3976 else if (pVCpu->iem.s.cActiveMappings > 0)
3977 iemMemRollback(pVCpu);
3978
3979 return rcStrict;
3980}
3981
3982
3983VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypass(PVMCPUCC pVCpu)
3984{
3985 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
3986 if (rcStrict == VINF_SUCCESS)
3987 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypass");
3988 else if (pVCpu->iem.s.cActiveMappings > 0)
3989 iemMemRollback(pVCpu);
3990
3991 return rcStrict;
3992}
3993
3994
3995VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
3996 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
3997{
3998 VBOXSTRICTRC rcStrict;
3999 if ( cbOpcodeBytes
4000 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
4001 {
4002 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
4003#ifdef IEM_WITH_CODE_TLB
4004 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
4005 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
4006 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
4007 pVCpu->iem.s.offCurInstrStart = 0;
4008 pVCpu->iem.s.offInstrNextByte = 0;
4009 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
4010#else
4011 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
4012 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
4013#endif
4014 rcStrict = VINF_SUCCESS;
4015 }
4016 else
4017 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
4018 if (rcStrict == VINF_SUCCESS)
4019 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
4020 else if (pVCpu->iem.s.cActiveMappings > 0)
4021 iemMemRollback(pVCpu);
4022
4023 return rcStrict;
4024}
4025
4026
4027/**
4028 * For handling split cacheline lock operations when the host has split-lock
4029 * detection enabled.
4030 *
4031 * This will cause the interpreter to disregard the lock prefix and implicit
4032 * locking (xchg).
4033 *
4034 * @returns Strict VBox status code.
4035 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4036 */
4037VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
4038{
4039 /*
4040 * Do the decoding and emulation.
4041 */
4042 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
4043 if (rcStrict == VINF_SUCCESS)
4044 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
4045 else if (pVCpu->iem.s.cActiveMappings > 0)
4046 iemMemRollback(pVCpu);
4047
4048 if (rcStrict != VINF_SUCCESS)
4049 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
4050 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
4051 return rcStrict;
4052}
4053
4054
4055/**
4056 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
4057 * inject a pending TRPM trap.
4058 */
4059VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
4060{
4061 Assert(TRPMHasTrap(pVCpu));
4062
4063 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
4064 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4065 {
4066 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
4067#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
4068 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
4069 if (fIntrEnabled)
4070 {
4071 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
4072 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
4073 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4074 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
4075 else
4076 {
4077 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
4078 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
4079 }
4080 }
4081#else
4082 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
4083#endif
4084 if (fIntrEnabled)
4085 {
4086 uint8_t u8TrapNo;
4087 TRPMEVENT enmType;
4088 uint32_t uErrCode;
4089 RTGCPTR uCr2;
4090 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
4091 AssertRC(rc2);
4092 Assert(enmType == TRPM_HARDWARE_INT);
4093 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
4094
4095 TRPMResetTrap(pVCpu);
4096
4097#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
4098 /* Injecting an event may cause a VM-exit. */
4099 if ( rcStrict != VINF_SUCCESS
4100 && rcStrict != VINF_IEM_RAISED_XCPT)
4101 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
4102#else
4103 NOREF(rcStrict);
4104#endif
4105 }
4106 }
4107
4108 return VINF_SUCCESS;
4109}
4110
4111
4112VMM_INT_DECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
4113{
4114 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
4115 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
4116 Assert(cMaxInstructions > 0);
4117
4118 /*
4119 * See if there is an interrupt pending in TRPM, inject it if we can.
4120 */
4121 /** @todo What if we are injecting an exception and not an interrupt? Is that
4122 * possible here? For now we assert it is indeed only an interrupt. */
4123 if (!TRPMHasTrap(pVCpu))
4124 { /* likely */ }
4125 else
4126 {
4127 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
4128 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4129 { /*likely */ }
4130 else
4131 return rcStrict;
4132 }
4133
4134 /*
4135 * Initial decoder init w/ prefetch, then setup setjmp.
4136 */
4137 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
4138 if (rcStrict == VINF_SUCCESS)
4139 {
4140#ifdef IEM_WITH_SETJMP
4141 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
4142 IEM_TRY_SETJMP(pVCpu, rcStrict)
4143#endif
4144 {
4145 /*
4146 * The run loop. We limit ourselves to 4096 instructions right now.
4147 */
4148 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
4149 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4150 for (;;)
4151 {
4152 /*
4153 * Log the state.
4154 */
4155#ifdef LOG_ENABLED
4156 iemLogCurInstr(pVCpu, true, "IEMExecLots");
4157#endif
4158
4159 /*
4160 * Do the decoding and emulation.
4161 */
4162 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
4163 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
4164#ifdef VBOX_STRICT
4165 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
4166#endif
4167 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4168 {
4169 Assert(pVCpu->iem.s.cActiveMappings == 0);
4170 pVCpu->iem.s.cInstructions++;
4171
4172#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4173 /* Perform any VMX nested-guest instruction boundary actions. */
4174 uint64_t fCpu = pVCpu->fLocalForcedActions;
4175 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
4176 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
4177 { /* likely */ }
4178 else
4179 {
4180 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
4181 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4182 fCpu = pVCpu->fLocalForcedActions;
4183 else
4184 {
4185 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
4186 break;
4187 }
4188 }
4189#endif
4190 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
4191 {
4192#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4193 uint64_t fCpu = pVCpu->fLocalForcedActions;
4194#endif
4195 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
4196 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
4197 | VMCPU_FF_TLB_FLUSH
4198 | VMCPU_FF_UNHALT );
4199
4200 if (RT_LIKELY( ( !fCpu
4201 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4202 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
4203 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
4204 {
4205 if (--cMaxInstructionsGccStupidity > 0)
4206 {
4207 /* Poll timers every now an then according to the caller's specs. */
4208 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
4209 || !TMTimerPollBool(pVM, pVCpu))
4210 {
4211 Assert(pVCpu->iem.s.cActiveMappings == 0);
4212 iemReInitDecoder(pVCpu);
4213 continue;
4214 }
4215 }
4216 }
4217 }
4218 Assert(pVCpu->iem.s.cActiveMappings == 0);
4219 }
4220 else if (pVCpu->iem.s.cActiveMappings > 0)
4221 iemMemRollback(pVCpu);
4222 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
4223 break;
4224 }
4225 }
4226#ifdef IEM_WITH_SETJMP
4227 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
4228 {
4229 if (pVCpu->iem.s.cActiveMappings > 0)
4230 iemMemRollback(pVCpu);
4231# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
4232 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
4233# endif
4234 pVCpu->iem.s.cLongJumps++;
4235 }
4236 IEM_CATCH_LONGJMP_END(pVCpu);
4237#endif
4238
4239 /*
4240 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
4241 */
4242 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4243 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4244 }
4245 else
4246 {
4247 if (pVCpu->iem.s.cActiveMappings > 0)
4248 iemMemRollback(pVCpu);
4249
4250#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
4251 /*
4252 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
4253 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
4254 */
4255 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
4256#endif
4257 }
4258
4259 /*
4260 * Maybe re-enter raw-mode and log.
4261 */
4262 if (rcStrict != VINF_SUCCESS)
4263 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
4264 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
4265 if (pcInstructions)
4266 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
4267 return rcStrict;
4268}
4269
4270
4271/**
4272 * Interface used by EMExecuteExec, does exit statistics and limits.
4273 *
4274 * @returns Strict VBox status code.
4275 * @param pVCpu The cross context virtual CPU structure.
4276 * @param fWillExit To be defined.
4277 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
4278 * @param cMaxInstructions Maximum number of instructions to execute.
4279 * @param cMaxInstructionsWithoutExits
4280 * The max number of instructions without exits.
4281 * @param pStats Where to return statistics.
4282 */
4283VMM_INT_DECL(VBOXSTRICTRC)
4284IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
4285 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
4286{
4287 NOREF(fWillExit); /** @todo define flexible exit crits */
4288
4289 /*
4290 * Initialize return stats.
4291 */
4292 pStats->cInstructions = 0;
4293 pStats->cExits = 0;
4294 pStats->cMaxExitDistance = 0;
4295 pStats->cReserved = 0;
4296
4297 /*
4298 * Initial decoder init w/ prefetch, then setup setjmp.
4299 */
4300 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
4301 if (rcStrict == VINF_SUCCESS)
4302 {
4303#ifdef IEM_WITH_SETJMP
4304 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
4305 IEM_TRY_SETJMP(pVCpu, rcStrict)
4306#endif
4307 {
4308#ifdef IN_RING0
4309 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
4310#endif
4311 uint32_t cInstructionSinceLastExit = 0;
4312
4313 /*
4314 * The run loop. We limit ourselves to 4096 instructions right now.
4315 */
4316 PVM pVM = pVCpu->CTX_SUFF(pVM);
4317 for (;;)
4318 {
4319 /*
4320 * Log the state.
4321 */
4322#ifdef LOG_ENABLED
4323 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
4324#endif
4325
4326 /*
4327 * Do the decoding and emulation.
4328 */
4329 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
4330
4331 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
4332 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
4333
4334 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
4335 && cInstructionSinceLastExit > 0 /* don't count the first */ )
4336 {
4337 pStats->cExits += 1;
4338 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
4339 pStats->cMaxExitDistance = cInstructionSinceLastExit;
4340 cInstructionSinceLastExit = 0;
4341 }
4342
4343 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4344 {
4345 Assert(pVCpu->iem.s.cActiveMappings == 0);
4346 pVCpu->iem.s.cInstructions++;
4347 pStats->cInstructions++;
4348 cInstructionSinceLastExit++;
4349
4350#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4351 /* Perform any VMX nested-guest instruction boundary actions. */
4352 uint64_t fCpu = pVCpu->fLocalForcedActions;
4353 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
4354 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
4355 { /* likely */ }
4356 else
4357 {
4358 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
4359 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4360 fCpu = pVCpu->fLocalForcedActions;
4361 else
4362 {
4363 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
4364 break;
4365 }
4366 }
4367#endif
4368 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
4369 {
4370#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
4371 uint64_t fCpu = pVCpu->fLocalForcedActions;
4372#endif
4373 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
4374 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
4375 | VMCPU_FF_TLB_FLUSH
4376 | VMCPU_FF_UNHALT );
4377 if (RT_LIKELY( ( ( !fCpu
4378 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4379 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
4380 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
4381 || pStats->cInstructions < cMinInstructions))
4382 {
4383 if (pStats->cInstructions < cMaxInstructions)
4384 {
4385 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
4386 {
4387#ifdef IN_RING0
4388 if ( !fCheckPreemptionPending
4389 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
4390#endif
4391 {
4392 Assert(pVCpu->iem.s.cActiveMappings == 0);
4393 iemReInitDecoder(pVCpu);
4394 continue;
4395 }
4396#ifdef IN_RING0
4397 rcStrict = VINF_EM_RAW_INTERRUPT;
4398 break;
4399#endif
4400 }
4401 }
4402 }
4403 Assert(!(fCpu & VMCPU_FF_IEM));
4404 }
4405 Assert(pVCpu->iem.s.cActiveMappings == 0);
4406 }
4407 else if (pVCpu->iem.s.cActiveMappings > 0)
4408 iemMemRollback(pVCpu);
4409 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
4410 break;
4411 }
4412 }
4413#ifdef IEM_WITH_SETJMP
4414 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
4415 {
4416 if (pVCpu->iem.s.cActiveMappings > 0)
4417 iemMemRollback(pVCpu);
4418 pVCpu->iem.s.cLongJumps++;
4419 }
4420 IEM_CATCH_LONGJMP_END(pVCpu);
4421#endif
4422
4423 /*
4424 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
4425 */
4426 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4427 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4428 }
4429 else
4430 {
4431 if (pVCpu->iem.s.cActiveMappings > 0)
4432 iemMemRollback(pVCpu);
4433
4434#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
4435 /*
4436 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
4437 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
4438 */
4439 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
4440#endif
4441 }
4442
4443 /*
4444 * Maybe re-enter raw-mode and log.
4445 */
4446 if (rcStrict != VINF_SUCCESS)
4447 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
4448 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
4449 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
4450 return rcStrict;
4451}
4452
4453
4454/**
4455 * Injects a trap, fault, abort, software interrupt or external interrupt.
4456 *
4457 * The parameter list matches TRPMQueryTrapAll pretty closely.
4458 *
4459 * @returns Strict VBox status code.
4460 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4461 * @param u8TrapNo The trap number.
4462 * @param enmType What type is it (trap/fault/abort), software
4463 * interrupt or hardware interrupt.
4464 * @param uErrCode The error code if applicable.
4465 * @param uCr2 The CR2 value if applicable.
4466 * @param cbInstr The instruction length (only relevant for
4467 * software interrupts).
4468 * @note x86 specific, but difficult to move due to iemInitDecoder dep.
4469 */
4470VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
4471 uint8_t cbInstr)
4472{
4473 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
4474#ifdef DBGFTRACE_ENABLED
4475 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
4476 u8TrapNo, enmType, uErrCode, uCr2);
4477#endif
4478
4479 uint32_t fFlags;
4480 switch (enmType)
4481 {
4482 case TRPM_HARDWARE_INT:
4483 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
4484 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
4485 uErrCode = uCr2 = 0;
4486 break;
4487
4488 case TRPM_SOFTWARE_INT:
4489 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
4490 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
4491 uErrCode = uCr2 = 0;
4492 break;
4493
4494 case TRPM_TRAP:
4495 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
4496 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
4497 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
4498 if (u8TrapNo == X86_XCPT_PF)
4499 fFlags |= IEM_XCPT_FLAGS_CR2;
4500 switch (u8TrapNo)
4501 {
4502 case X86_XCPT_DF:
4503 case X86_XCPT_TS:
4504 case X86_XCPT_NP:
4505 case X86_XCPT_SS:
4506 case X86_XCPT_PF:
4507 case X86_XCPT_AC:
4508 case X86_XCPT_GP:
4509 fFlags |= IEM_XCPT_FLAGS_ERR;
4510 break;
4511 }
4512 break;
4513
4514 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4515 }
4516
4517 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
4518
4519 if (pVCpu->iem.s.cActiveMappings > 0)
4520 iemMemRollback(pVCpu);
4521
4522 return rcStrict;
4523}
4524
4525
4526/**
4527 * Injects the active TRPM event.
4528 *
4529 * @returns Strict VBox status code.
4530 * @param pVCpu The cross context virtual CPU structure.
4531 */
4532VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
4533{
4534#ifndef IEM_IMPLEMENTS_TASKSWITCH
4535 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
4536#else
4537 uint8_t u8TrapNo;
4538 TRPMEVENT enmType;
4539 uint32_t uErrCode;
4540 RTGCUINTPTR uCr2;
4541 uint8_t cbInstr;
4542 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
4543 if (RT_FAILURE(rc))
4544 return rc;
4545
4546 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
4547 * ICEBP \#DB injection as a special case. */
4548 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
4549#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4550 if (rcStrict == VINF_SVM_VMEXIT)
4551 rcStrict = VINF_SUCCESS;
4552#endif
4553#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4554 if (rcStrict == VINF_VMX_VMEXIT)
4555 rcStrict = VINF_SUCCESS;
4556#endif
4557 /** @todo Are there any other codes that imply the event was successfully
4558 * delivered to the guest? See @bugref{6607}. */
4559 if ( rcStrict == VINF_SUCCESS
4560 || rcStrict == VINF_IEM_RAISED_XCPT)
4561 TRPMResetTrap(pVCpu);
4562
4563 return rcStrict;
4564#endif
4565}
4566
4567
4568VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
4569{
4570 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
4571 return VERR_NOT_IMPLEMENTED;
4572}
4573
4574
4575VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
4576{
4577 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
4578 return VERR_NOT_IMPLEMENTED;
4579}
4580
4581#ifdef IN_RING3
4582
4583/**
4584 * Handles the unlikely and probably fatal merge cases.
4585 *
4586 * @returns Merged status code.
4587 * @param rcStrict Current EM status code.
4588 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
4589 * with @a rcStrict.
4590 * @param iMemMap The memory mapping index. For error reporting only.
4591 * @param pVCpu The cross context virtual CPU structure of the calling
4592 * thread, for error reporting only.
4593 */
4594DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
4595 unsigned iMemMap, PVMCPUCC pVCpu)
4596{
4597 if (RT_FAILURE_NP(rcStrict))
4598 return rcStrict;
4599
4600 if (RT_FAILURE_NP(rcStrictCommit))
4601 return rcStrictCommit;
4602
4603 if (rcStrict == rcStrictCommit)
4604 return rcStrictCommit;
4605
4606 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
4607 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
4608 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
4609 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
4610 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
4611 return VERR_IOM_FF_STATUS_IPE;
4612}
4613
4614
4615/**
4616 * Helper for IOMR3ProcessForceFlag.
4617 *
4618 * @returns Merged status code.
4619 * @param rcStrict Current EM status code.
4620 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
4621 * with @a rcStrict.
4622 * @param iMemMap The memory mapping index. For error reporting only.
4623 * @param pVCpu The cross context virtual CPU structure of the calling
4624 * thread, for error reporting only.
4625 */
4626DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
4627{
4628 /* Simple. */
4629 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
4630 return rcStrictCommit;
4631
4632 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
4633 return rcStrict;
4634
4635 /* EM scheduling status codes. */
4636 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
4637 && rcStrict <= VINF_EM_LAST))
4638 {
4639 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
4640 && rcStrictCommit <= VINF_EM_LAST))
4641 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
4642 }
4643
4644 /* Unlikely */
4645 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
4646}
4647
4648
4649/**
4650 * Called by force-flag handling code when VMCPU_FF_IEM is set.
4651 *
4652 * @returns Merge between @a rcStrict and what the commit operation returned.
4653 * @param pVM The cross context VM structure.
4654 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4655 * @param rcStrict The status code returned by ring-0 or raw-mode.
4656 */
4657VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
4658{
4659 /*
4660 * Reset the pending commit.
4661 */
4662 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
4663 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
4664 ("%#x %#x %#x\n",
4665 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
4666 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
4667
4668 /*
4669 * Commit the pending bounce buffers (usually just one).
4670 */
4671 unsigned cBufs = 0;
4672 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
4673 while (iMemMap-- > 0)
4674 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
4675 {
4676 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
4677 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
4678 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
4679
4680 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
4681 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
4682 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
4683
4684 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
4685 {
4686 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
4687 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
4688 pbBuf,
4689 cbFirst,
4690 PGMACCESSORIGIN_IEM);
4691 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
4692 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
4693 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
4694 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
4695 }
4696
4697 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
4698 {
4699 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
4700 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
4701 pbBuf + cbFirst,
4702 cbSecond,
4703 PGMACCESSORIGIN_IEM);
4704 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
4705 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
4706 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
4707 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
4708 }
4709 cBufs++;
4710 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
4711 }
4712
4713 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
4714 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
4715 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
4716 pVCpu->iem.s.cActiveMappings = 0;
4717 return rcStrict;
4718}
4719
4720#endif /* IN_RING3 */
4721
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette