VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 105172

Last change on this file since 105172 was 105137, checked in by vboxsync, 7 months ago

VMM/IEM: Logging fix. bugref:10715

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 451.2 KB
Line 
1/* $Id: IEMAll.cpp 105137 2024-07-04 09:12:44Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gcm.h>
134#include <VBox/vmm/gim.h>
135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
136# include <VBox/vmm/em.h>
137# include <VBox/vmm/hm_svm.h>
138#endif
139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
140# include <VBox/vmm/hmvmxinline.h>
141#endif
142#include <VBox/vmm/tm.h>
143#include <VBox/vmm/dbgf.h>
144#include <VBox/vmm/dbgftrace.h>
145#include "IEMInternal.h"
146#include <VBox/vmm/vmcc.h>
147#include <VBox/log.h>
148#include <VBox/err.h>
149#include <VBox/param.h>
150#include <VBox/dis.h>
151#include <iprt/asm-math.h>
152#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
153# include <iprt/asm-amd64-x86.h>
154#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
155# include <iprt/asm-arm.h>
156#endif
157#include <iprt/assert.h>
158#include <iprt/string.h>
159#include <iprt/x86.h>
160
161#include "IEMInline.h"
162
163
164/*********************************************************************************************************************************
165* Structures and Typedefs *
166*********************************************************************************************************************************/
167/**
168 * CPU exception classes.
169 */
170typedef enum IEMXCPTCLASS
171{
172 IEMXCPTCLASS_BENIGN,
173 IEMXCPTCLASS_CONTRIBUTORY,
174 IEMXCPTCLASS_PAGE_FAULT,
175 IEMXCPTCLASS_DOUBLE_FAULT
176} IEMXCPTCLASS;
177
178
179/*********************************************************************************************************************************
180* Global Variables *
181*********************************************************************************************************************************/
182#if defined(IEM_LOG_MEMORY_WRITES)
183/** What IEM just wrote. */
184uint8_t g_abIemWrote[256];
185/** How much IEM just wrote. */
186size_t g_cbIemWrote;
187#endif
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
194 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
195
196
197/**
198 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
199 * path.
200 *
201 * This will also invalidate TLB entries for any pages with active data
202 * breakpoints on them.
203 *
204 * @returns IEM_F_BRK_PENDING_XXX or zero.
205 * @param pVCpu The cross context virtual CPU structure of the
206 * calling thread.
207 *
208 * @note Don't call directly, use iemCalcExecDbgFlags instead.
209 */
210uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
211{
212 uint32_t fExec = 0;
213
214 /*
215 * Helper for invalidate the data TLB for breakpoint addresses.
216 *
217 * This is to make sure any access to the page will always trigger a TLB
218 * load for as long as the breakpoint is enabled.
219 */
220#ifdef IEM_WITH_DATA_TLB
221# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
222 RTGCPTR uTagNoRev = (a_uValue); \
223 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
224 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
225 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
226 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
227 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
228 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
229 } while (0)
230#else
231# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
232#endif
233
234 /*
235 * Process guest breakpoints.
236 */
237#define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
238 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
239 { \
240 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
241 { \
242 case X86_DR7_RW_EO: \
243 fExec |= IEM_F_PENDING_BRK_INSTR; \
244 break; \
245 case X86_DR7_RW_WO: \
246 case X86_DR7_RW_RW: \
247 fExec |= IEM_F_PENDING_BRK_DATA; \
248 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
249 break; \
250 case X86_DR7_RW_IO: \
251 fExec |= IEM_F_PENDING_BRK_X86_IO; \
252 break; \
253 } \
254 } \
255 } while (0)
256
257 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
258 if (fGstDr7 & X86_DR7_ENABLED_MASK)
259 {
260/** @todo extract more details here to simplify matching later. */
261#ifdef IEM_WITH_DATA_TLB
262 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
263#endif
264 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
265 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
266 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
267 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
268 }
269
270 /*
271 * Process hypervisor breakpoints.
272 */
273 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
274 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
275 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
276 {
277/** @todo extract more details here to simplify matching later. */
278 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
279 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
280 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
281 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
282 }
283
284 return fExec;
285}
286
287
288/**
289 * Initializes the decoder state.
290 *
291 * iemReInitDecoder is mostly a copy of this function.
292 *
293 * @param pVCpu The cross context virtual CPU structure of the
294 * calling thread.
295 * @param fExecOpts Optional execution flags:
296 * - IEM_F_BYPASS_HANDLERS
297 * - IEM_F_X86_DISREGARD_LOCK
298 */
299DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
300{
301 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
302 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
303 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
311
312 /* Execution state: */
313 uint32_t fExec;
314 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
315
316 /* Decoder state: */
317 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
318 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
319 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
320 {
321 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
322 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
323 }
324 else
325 {
326 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
327 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
328 }
329 pVCpu->iem.s.fPrefixes = 0;
330 pVCpu->iem.s.uRexReg = 0;
331 pVCpu->iem.s.uRexB = 0;
332 pVCpu->iem.s.uRexIndex = 0;
333 pVCpu->iem.s.idxPrefix = 0;
334 pVCpu->iem.s.uVex3rdReg = 0;
335 pVCpu->iem.s.uVexLength = 0;
336 pVCpu->iem.s.fEvexStuff = 0;
337 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
338#ifdef IEM_WITH_CODE_TLB
339 pVCpu->iem.s.pbInstrBuf = NULL;
340 pVCpu->iem.s.offInstrNextByte = 0;
341 pVCpu->iem.s.offCurInstrStart = 0;
342# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
343 pVCpu->iem.s.offOpcode = 0;
344# endif
345# ifdef VBOX_STRICT
346 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
347 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
348 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
349 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
350# endif
351#else
352 pVCpu->iem.s.offOpcode = 0;
353 pVCpu->iem.s.cbOpcode = 0;
354#endif
355 pVCpu->iem.s.offModRm = 0;
356 pVCpu->iem.s.cActiveMappings = 0;
357 pVCpu->iem.s.iNextMapping = 0;
358 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
359
360#ifdef DBGFTRACE_ENABLED
361 switch (IEM_GET_CPU_MODE(pVCpu))
362 {
363 case IEMMODE_64BIT:
364 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
365 break;
366 case IEMMODE_32BIT:
367 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
368 break;
369 case IEMMODE_16BIT:
370 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
371 break;
372 }
373#endif
374}
375
376
377/**
378 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
379 *
380 * This is mostly a copy of iemInitDecoder.
381 *
382 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
383 */
384DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
385{
386 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
387 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
388 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
395
396 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
397 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
398 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
399
400 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
401 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
402 pVCpu->iem.s.enmEffAddrMode = enmMode;
403 if (enmMode != IEMMODE_64BIT)
404 {
405 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
406 pVCpu->iem.s.enmEffOpSize = enmMode;
407 }
408 else
409 {
410 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
411 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
412 }
413 pVCpu->iem.s.fPrefixes = 0;
414 pVCpu->iem.s.uRexReg = 0;
415 pVCpu->iem.s.uRexB = 0;
416 pVCpu->iem.s.uRexIndex = 0;
417 pVCpu->iem.s.idxPrefix = 0;
418 pVCpu->iem.s.uVex3rdReg = 0;
419 pVCpu->iem.s.uVexLength = 0;
420 pVCpu->iem.s.fEvexStuff = 0;
421 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
422#ifdef IEM_WITH_CODE_TLB
423 if (pVCpu->iem.s.pbInstrBuf)
424 {
425 uint64_t off = (enmMode == IEMMODE_64BIT
426 ? pVCpu->cpum.GstCtx.rip
427 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
428 - pVCpu->iem.s.uInstrBufPc;
429 if (off < pVCpu->iem.s.cbInstrBufTotal)
430 {
431 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
432 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
433 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
434 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
435 else
436 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
437 }
438 else
439 {
440 pVCpu->iem.s.pbInstrBuf = NULL;
441 pVCpu->iem.s.offInstrNextByte = 0;
442 pVCpu->iem.s.offCurInstrStart = 0;
443 pVCpu->iem.s.cbInstrBuf = 0;
444 pVCpu->iem.s.cbInstrBufTotal = 0;
445 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
446 }
447 }
448 else
449 {
450 pVCpu->iem.s.offInstrNextByte = 0;
451 pVCpu->iem.s.offCurInstrStart = 0;
452 pVCpu->iem.s.cbInstrBuf = 0;
453 pVCpu->iem.s.cbInstrBufTotal = 0;
454# ifdef VBOX_STRICT
455 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
456# endif
457 }
458# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
459 pVCpu->iem.s.offOpcode = 0;
460# endif
461#else /* !IEM_WITH_CODE_TLB */
462 pVCpu->iem.s.cbOpcode = 0;
463 pVCpu->iem.s.offOpcode = 0;
464#endif /* !IEM_WITH_CODE_TLB */
465 pVCpu->iem.s.offModRm = 0;
466 Assert(pVCpu->iem.s.cActiveMappings == 0);
467 pVCpu->iem.s.iNextMapping = 0;
468 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
469 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
470
471#ifdef DBGFTRACE_ENABLED
472 switch (enmMode)
473 {
474 case IEMMODE_64BIT:
475 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
476 break;
477 case IEMMODE_32BIT:
478 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
479 break;
480 case IEMMODE_16BIT:
481 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
482 break;
483 }
484#endif
485}
486
487
488
489/**
490 * Prefetch opcodes the first time when starting executing.
491 *
492 * @returns Strict VBox status code.
493 * @param pVCpu The cross context virtual CPU structure of the
494 * calling thread.
495 * @param fExecOpts Optional execution flags:
496 * - IEM_F_BYPASS_HANDLERS
497 * - IEM_F_X86_DISREGARD_LOCK
498 */
499static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
500{
501 iemInitDecoder(pVCpu, fExecOpts);
502
503#ifndef IEM_WITH_CODE_TLB
504 /*
505 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
506 *
507 * First translate CS:rIP to a physical address.
508 *
509 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
510 * all relevant bytes from the first page, as it ASSUMES it's only ever
511 * called for dealing with CS.LIM, page crossing and instructions that
512 * are too long.
513 */
514 uint32_t cbToTryRead;
515 RTGCPTR GCPtrPC;
516 if (IEM_IS_64BIT_CODE(pVCpu))
517 {
518 cbToTryRead = GUEST_PAGE_SIZE;
519 GCPtrPC = pVCpu->cpum.GstCtx.rip;
520 if (IEM_IS_CANONICAL(GCPtrPC))
521 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
522 else
523 return iemRaiseGeneralProtectionFault0(pVCpu);
524 }
525 else
526 {
527 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
528 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
529 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
530 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
531 else
532 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
533 if (cbToTryRead) { /* likely */ }
534 else /* overflowed */
535 {
536 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
537 cbToTryRead = UINT32_MAX;
538 }
539 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
540 Assert(GCPtrPC <= UINT32_MAX);
541 }
542
543 PGMPTWALKFAST WalkFast;
544 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
545 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
546 &WalkFast);
547 if (RT_SUCCESS(rc))
548 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
549 else
550 {
551 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
552# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
553/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
554 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
555 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
556 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
557# endif
558 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
559 }
560#if 0
561 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
562 else
563 {
564 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
565# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
566/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
567# error completely wrong
568 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
569 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
570# endif
571 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
572 }
573 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
574 else
575 {
576 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
577# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
578/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
579# error completely wrong.
580 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
581 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
582# endif
583 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
584 }
585#else
586 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
587 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
588#endif
589 RTGCPHYS const GCPhys = WalkFast.GCPhys;
590
591 /*
592 * Read the bytes at this address.
593 */
594 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
595 if (cbToTryRead > cbLeftOnPage)
596 cbToTryRead = cbLeftOnPage;
597 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
598 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
599
600 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
601 {
602 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
603 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
604 { /* likely */ }
605 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
606 {
607 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
608 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
609 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
610 }
611 else
612 {
613 Log((RT_SUCCESS(rcStrict)
614 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
615 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
616 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
617 return rcStrict;
618 }
619 }
620 else
621 {
622 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
623 if (RT_SUCCESS(rc))
624 { /* likely */ }
625 else
626 {
627 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
628 GCPtrPC, GCPhys, rc, cbToTryRead));
629 return rc;
630 }
631 }
632 pVCpu->iem.s.cbOpcode = cbToTryRead;
633#endif /* !IEM_WITH_CODE_TLB */
634 return VINF_SUCCESS;
635}
636
637
638#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
639/**
640 * Worker for iemTlbInvalidateAll.
641 */
642template<bool a_fGlobal>
643DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
644{
645 if (!a_fGlobal)
646 pTlb->cTlsFlushes++;
647 else
648 pTlb->cTlsGlobalFlushes++;
649
650 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
651 if (RT_LIKELY(pTlb->uTlbRevision != 0))
652 { /* very likely */ }
653 else
654 {
655 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
656 pTlb->cTlbRevisionRollovers++;
657 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
658 while (i-- > 0)
659 pTlb->aEntries[i * 2].uTag = 0;
660 }
661 if (a_fGlobal)
662 {
663 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
664 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
665 { /* very likely */ }
666 else
667 {
668 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
669 pTlb->cTlbRevisionRollovers++;
670 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
671 while (i-- > 0)
672 pTlb->aEntries[i * 2 + 1].uTag = 0;
673 }
674 }
675}
676#endif
677
678
679/**
680 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
681 */
682template<bool a_fGlobal>
683DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
684{
685#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
686 Log10(("IEMTlbInvalidateAll\n"));
687
688# ifdef IEM_WITH_CODE_TLB
689 pVCpu->iem.s.cbInstrBufTotal = 0;
690 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
691# endif
692
693# ifdef IEM_WITH_DATA_TLB
694 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
695# endif
696#else
697 RT_NOREF(pVCpu);
698#endif
699}
700
701
702/**
703 * Invalidates non-global the IEM TLB entries.
704 *
705 * This is called internally as well as by PGM when moving GC mappings.
706 *
707 * @param pVCpu The cross context virtual CPU structure of the calling
708 * thread.
709 */
710VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
711{
712 iemTlbInvalidateAll<false>(pVCpu);
713}
714
715
716/**
717 * Invalidates all the IEM TLB entries.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 */
724VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
725{
726 iemTlbInvalidateAll<true>(pVCpu);
727}
728
729
730/**
731 * Invalidates a page in the TLBs.
732 *
733 * @param pVCpu The cross context virtual CPU structure of the calling
734 * thread.
735 * @param GCPtr The address of the page to invalidate
736 * @thread EMT(pVCpu)
737 */
738VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
739{
740#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
741 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
742 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
743 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
744 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
745
746# ifdef IEM_WITH_CODE_TLB
747 if (pVCpu->iem.s.CodeTlb.aEntries[idxEven].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
748 {
749 pVCpu->iem.s.CodeTlb.aEntries[idxEven].uTag = 0;
750 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
751 pVCpu->iem.s.cbInstrBufTotal = 0;
752 }
753 if (pVCpu->iem.s.CodeTlb.aEntries[idxEven + 1].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
754 {
755 pVCpu->iem.s.CodeTlb.aEntries[idxEven + 1].uTag = 0;
756 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
757 pVCpu->iem.s.cbInstrBufTotal = 0;
758 }
759# endif
760
761# ifdef IEM_WITH_DATA_TLB
762 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
763 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0;
764 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal))
765 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0;
766# endif
767#else
768 NOREF(pVCpu); NOREF(GCPtr);
769#endif
770}
771
772
773#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
774/**
775 * Invalid both TLBs slow fashion following a rollover.
776 *
777 * Worker for IEMTlbInvalidateAllPhysical,
778 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
779 * iemMemMapJmp and others.
780 *
781 * @thread EMT(pVCpu)
782 */
783static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
784{
785 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
786 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
787 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
788
789 unsigned i;
790# ifdef IEM_WITH_CODE_TLB
791 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
792 while (i-- > 0)
793 {
794 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
795 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
796 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
797 }
798 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
799 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
800# endif
801# ifdef IEM_WITH_DATA_TLB
802 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
803 while (i-- > 0)
804 {
805 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
806 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
807 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
808 }
809 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
810 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
811# endif
812
813}
814#endif
815
816
817/**
818 * Invalidates the host physical aspects of the IEM TLBs.
819 *
820 * This is called internally as well as by PGM when moving GC mappings.
821 *
822 * @param pVCpu The cross context virtual CPU structure of the calling
823 * thread.
824 * @note Currently not used.
825 */
826VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
827{
828#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
829 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
830 Log10(("IEMTlbInvalidateAllPhysical\n"));
831
832# ifdef IEM_WITH_CODE_TLB
833 pVCpu->iem.s.cbInstrBufTotal = 0;
834# endif
835 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
836 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
837 {
838 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
839 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
840 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
841 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
842 }
843 else
844 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
845#else
846 NOREF(pVCpu);
847#endif
848}
849
850
851/**
852 * Invalidates the host physical aspects of the IEM TLBs.
853 *
854 * This is called internally as well as by PGM when moving GC mappings.
855 *
856 * @param pVM The cross context VM structure.
857 * @param idCpuCaller The ID of the calling EMT if available to the caller,
858 * otherwise NIL_VMCPUID.
859 * @param enmReason The reason we're called.
860 *
861 * @remarks Caller holds the PGM lock.
862 */
863VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
864{
865#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
866 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
867 if (pVCpuCaller)
868 VMCPU_ASSERT_EMT(pVCpuCaller);
869 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
870
871 VMCC_FOR_EACH_VMCPU(pVM)
872 {
873# ifdef IEM_WITH_CODE_TLB
874 if (pVCpuCaller == pVCpu)
875 pVCpu->iem.s.cbInstrBufTotal = 0;
876# endif
877
878 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
879 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
880 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
881 { /* likely */}
882 else if (pVCpuCaller != pVCpu)
883 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
884 else
885 {
886 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
887 continue;
888 }
889 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
890 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
891
892 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
893 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
894 }
895 VMCC_FOR_EACH_VMCPU_END(pVM);
896
897#else
898 RT_NOREF(pVM, idCpuCaller, enmReason);
899#endif
900}
901
902
903/**
904 * Flushes the prefetch buffer, light version.
905 */
906void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
907{
908#ifndef IEM_WITH_CODE_TLB
909 pVCpu->iem.s.cbOpcode = cbInstr;
910#else
911 RT_NOREF(pVCpu, cbInstr);
912#endif
913}
914
915
916/**
917 * Flushes the prefetch buffer, heavy version.
918 */
919void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
920{
921#ifndef IEM_WITH_CODE_TLB
922 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
923#elif 1
924 pVCpu->iem.s.cbInstrBufTotal = 0;
925 RT_NOREF(cbInstr);
926#else
927 RT_NOREF(pVCpu, cbInstr);
928#endif
929}
930
931
932
933#ifdef IEM_WITH_CODE_TLB
934
935/**
936 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
937 * failure and jumps.
938 *
939 * We end up here for a number of reasons:
940 * - pbInstrBuf isn't yet initialized.
941 * - Advancing beyond the buffer boundrary (e.g. cross page).
942 * - Advancing beyond the CS segment limit.
943 * - Fetching from non-mappable page (e.g. MMIO).
944 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
945 *
946 * @param pVCpu The cross context virtual CPU structure of the
947 * calling thread.
948 * @param pvDst Where to return the bytes.
949 * @param cbDst Number of bytes to read. A value of zero is
950 * allowed for initializing pbInstrBuf (the
951 * recompiler does this). In this case it is best
952 * to set pbInstrBuf to NULL prior to the call.
953 */
954void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
955{
956# ifdef IN_RING3
957 for (;;)
958 {
959 Assert(cbDst <= 8);
960 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
961
962 /*
963 * We might have a partial buffer match, deal with that first to make the
964 * rest simpler. This is the first part of the cross page/buffer case.
965 */
966 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
967 if (pbInstrBuf != NULL)
968 {
969 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
970 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
971 if (offBuf < cbInstrBuf)
972 {
973 Assert(offBuf + cbDst > cbInstrBuf);
974 uint32_t const cbCopy = cbInstrBuf - offBuf;
975 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
976
977 cbDst -= cbCopy;
978 pvDst = (uint8_t *)pvDst + cbCopy;
979 offBuf += cbCopy;
980 }
981 }
982
983 /*
984 * Check segment limit, figuring how much we're allowed to access at this point.
985 *
986 * We will fault immediately if RIP is past the segment limit / in non-canonical
987 * territory. If we do continue, there are one or more bytes to read before we
988 * end up in trouble and we need to do that first before faulting.
989 */
990 RTGCPTR GCPtrFirst;
991 uint32_t cbMaxRead;
992 if (IEM_IS_64BIT_CODE(pVCpu))
993 {
994 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
995 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
996 { /* likely */ }
997 else
998 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
999 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1000 }
1001 else
1002 {
1003 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1004 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1005 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1006 { /* likely */ }
1007 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
1008 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1009 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1010 if (cbMaxRead != 0)
1011 { /* likely */ }
1012 else
1013 {
1014 /* Overflowed because address is 0 and limit is max. */
1015 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1016 cbMaxRead = X86_PAGE_SIZE;
1017 }
1018 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1019 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1020 if (cbMaxRead2 < cbMaxRead)
1021 cbMaxRead = cbMaxRead2;
1022 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1023 }
1024
1025 /*
1026 * Get the TLB entry for this piece of code.
1027 */
1028 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
1029 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
1030 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
1031 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
1032 {
1033 /* likely when executing lots of code, otherwise unlikely */
1034# ifdef IEM_WITH_TLB_STATISTICS
1035 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
1036# endif
1037 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1038
1039 /* Check TLB page table level access flags. */
1040 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1041 {
1042 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
1043 {
1044 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1045 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1046 }
1047 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1048 {
1049 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1050 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1051 }
1052 }
1053
1054 /* Look up the physical page info if necessary. */
1055 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1056 { /* not necessary */ }
1057 else
1058 {
1059 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1060 { /* likely */ }
1061 else
1062 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1063 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
1064 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1065 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1066 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1067 }
1068 }
1069 else
1070 {
1071 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
1072
1073 /* This page table walking will set A bits as required by the access while performing the walk.
1074 ASSUMES these are set when the address is translated rather than on commit... */
1075 /** @todo testcase: check when A bits are actually set by the CPU for code. */
1076 PGMPTWALKFAST WalkFast;
1077 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
1078 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1079 &WalkFast);
1080 if (RT_SUCCESS(rc))
1081 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1082 else
1083 {
1084#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1085 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
1086 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
1087#endif
1088 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1089 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
1090 }
1091
1092 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1093 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
1094 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
1095 {
1096 pTlbe--;
1097 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
1098 }
1099 else
1100 {
1101 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
1102 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
1103 }
1104 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
1105 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/;
1106 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1107 pTlbe->GCPhys = GCPhysPg;
1108 pTlbe->pbMappingR3 = NULL;
1109 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1110 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
1111 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1112
1113 /* Resolve the physical address. */
1114 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1115 { /* likely */ }
1116 else
1117 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1118 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
1119 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1120 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1121 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1122 }
1123
1124# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1125 /*
1126 * Try do a direct read using the pbMappingR3 pointer.
1127 * Note! Do not recheck the physical TLB revision number here as we have the
1128 * wrong response to changes in the else case. If someone is updating
1129 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
1130 * pretending we always won the race.
1131 */
1132 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1133 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
1134 {
1135 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1136 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1137 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1138 {
1139 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1140 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1141 }
1142 else
1143 {
1144 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1145 if (cbInstr + (uint32_t)cbDst <= 15)
1146 {
1147 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1148 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1149 }
1150 else
1151 {
1152 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1153 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1154 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1155 }
1156 }
1157 if (cbDst <= cbMaxRead)
1158 {
1159 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1160 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1161
1162 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1163 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1164 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1165 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1166 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1167 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1168 else
1169 Assert(!pvDst);
1170 return;
1171 }
1172 pVCpu->iem.s.pbInstrBuf = NULL;
1173
1174 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1175 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1176 }
1177# else
1178# error "refactor as needed"
1179 /*
1180 * If there is no special read handling, so we can read a bit more and
1181 * put it in the prefetch buffer.
1182 */
1183 if ( cbDst < cbMaxRead
1184 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1185 {
1186 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1187 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1188 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1189 { /* likely */ }
1190 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1191 {
1192 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1193 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1194 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1195 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1196 }
1197 else
1198 {
1199 Log((RT_SUCCESS(rcStrict)
1200 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1201 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1202 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1203 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1204 }
1205 }
1206# endif
1207 /*
1208 * Special read handling, so only read exactly what's needed.
1209 * This is a highly unlikely scenario.
1210 */
1211 else
1212 {
1213 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
1214
1215 /* Check instruction length. */
1216 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1217 if (RT_LIKELY(cbInstr + cbDst <= 15))
1218 { /* likely */ }
1219 else
1220 {
1221 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1222 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1223 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1224 }
1225
1226 /* Do the reading. */
1227 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1228 if (cbToRead > 0)
1229 {
1230 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1231 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1232 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1233 { /* likely */ }
1234 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1235 {
1236 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1237 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1238 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1239 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1240 }
1241 else
1242 {
1243 Log((RT_SUCCESS(rcStrict)
1244 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1245 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1246 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1247 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1248 }
1249 }
1250
1251 /* Update the state and probably return. */
1252 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1253 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1254 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1255
1256 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1257 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1258 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1259 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1260 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1261 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1262 pVCpu->iem.s.pbInstrBuf = NULL;
1263 if (cbToRead == cbDst)
1264 return;
1265 Assert(cbToRead == cbMaxRead);
1266 }
1267
1268 /*
1269 * More to read, loop.
1270 */
1271 cbDst -= cbMaxRead;
1272 pvDst = (uint8_t *)pvDst + cbMaxRead;
1273 }
1274# else /* !IN_RING3 */
1275 RT_NOREF(pvDst, cbDst);
1276 if (pvDst || cbDst)
1277 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1278# endif /* !IN_RING3 */
1279}
1280
1281#else /* !IEM_WITH_CODE_TLB */
1282
1283/**
1284 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1285 * exception if it fails.
1286 *
1287 * @returns Strict VBox status code.
1288 * @param pVCpu The cross context virtual CPU structure of the
1289 * calling thread.
1290 * @param cbMin The minimum number of bytes relative offOpcode
1291 * that must be read.
1292 */
1293VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1294{
1295 /*
1296 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1297 *
1298 * First translate CS:rIP to a physical address.
1299 */
1300 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1301 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1302 uint8_t const cbLeft = cbOpcode - offOpcode;
1303 Assert(cbLeft < cbMin);
1304 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1305
1306 uint32_t cbToTryRead;
1307 RTGCPTR GCPtrNext;
1308 if (IEM_IS_64BIT_CODE(pVCpu))
1309 {
1310 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1311 if (!IEM_IS_CANONICAL(GCPtrNext))
1312 return iemRaiseGeneralProtectionFault0(pVCpu);
1313 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1314 }
1315 else
1316 {
1317 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1318 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1319 GCPtrNext32 += cbOpcode;
1320 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1321 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1322 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1323 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1324 if (!cbToTryRead) /* overflowed */
1325 {
1326 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1327 cbToTryRead = UINT32_MAX;
1328 /** @todo check out wrapping around the code segment. */
1329 }
1330 if (cbToTryRead < cbMin - cbLeft)
1331 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1332 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1333
1334 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1335 if (cbToTryRead > cbLeftOnPage)
1336 cbToTryRead = cbLeftOnPage;
1337 }
1338
1339 /* Restrict to opcode buffer space.
1340
1341 We're making ASSUMPTIONS here based on work done previously in
1342 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1343 be fetched in case of an instruction crossing two pages. */
1344 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1345 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1346 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1347 { /* likely */ }
1348 else
1349 {
1350 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1351 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1352 return iemRaiseGeneralProtectionFault0(pVCpu);
1353 }
1354
1355 PGMPTWALKFAST WalkFast;
1356 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
1357 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1358 &WalkFast);
1359 if (RT_SUCCESS(rc))
1360 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1361 else
1362 {
1363 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1364#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1365 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
1366 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1367#endif
1368 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1369 }
1370 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
1371 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1372
1373 RTGCPHYS const GCPhys = WalkFast.GCPhys;
1374 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1375
1376 /*
1377 * Read the bytes at this address.
1378 *
1379 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1380 * and since PATM should only patch the start of an instruction there
1381 * should be no need to check again here.
1382 */
1383 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1384 {
1385 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1386 cbToTryRead, PGMACCESSORIGIN_IEM);
1387 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1388 { /* likely */ }
1389 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1390 {
1391 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1392 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1393 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1394 }
1395 else
1396 {
1397 Log((RT_SUCCESS(rcStrict)
1398 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1399 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1400 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1401 return rcStrict;
1402 }
1403 }
1404 else
1405 {
1406 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1407 if (RT_SUCCESS(rc))
1408 { /* likely */ }
1409 else
1410 {
1411 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1412 return rc;
1413 }
1414 }
1415 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1416 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1417
1418 return VINF_SUCCESS;
1419}
1420
1421#endif /* !IEM_WITH_CODE_TLB */
1422#ifndef IEM_WITH_SETJMP
1423
1424/**
1425 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1426 *
1427 * @returns Strict VBox status code.
1428 * @param pVCpu The cross context virtual CPU structure of the
1429 * calling thread.
1430 * @param pb Where to return the opcode byte.
1431 */
1432VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1433{
1434 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1435 if (rcStrict == VINF_SUCCESS)
1436 {
1437 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1438 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1439 pVCpu->iem.s.offOpcode = offOpcode + 1;
1440 }
1441 else
1442 *pb = 0;
1443 return rcStrict;
1444}
1445
1446#else /* IEM_WITH_SETJMP */
1447
1448/**
1449 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1450 *
1451 * @returns The opcode byte.
1452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1453 */
1454uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1455{
1456# ifdef IEM_WITH_CODE_TLB
1457 uint8_t u8;
1458 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1459 return u8;
1460# else
1461 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1462 if (rcStrict == VINF_SUCCESS)
1463 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1464 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1465# endif
1466}
1467
1468#endif /* IEM_WITH_SETJMP */
1469
1470#ifndef IEM_WITH_SETJMP
1471
1472/**
1473 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1474 *
1475 * @returns Strict VBox status code.
1476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1477 * @param pu16 Where to return the opcode dword.
1478 */
1479VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1480{
1481 uint8_t u8;
1482 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1483 if (rcStrict == VINF_SUCCESS)
1484 *pu16 = (int8_t)u8;
1485 return rcStrict;
1486}
1487
1488
1489/**
1490 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1491 *
1492 * @returns Strict VBox status code.
1493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1494 * @param pu32 Where to return the opcode dword.
1495 */
1496VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1497{
1498 uint8_t u8;
1499 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1500 if (rcStrict == VINF_SUCCESS)
1501 *pu32 = (int8_t)u8;
1502 return rcStrict;
1503}
1504
1505
1506/**
1507 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1508 *
1509 * @returns Strict VBox status code.
1510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1511 * @param pu64 Where to return the opcode qword.
1512 */
1513VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1514{
1515 uint8_t u8;
1516 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1517 if (rcStrict == VINF_SUCCESS)
1518 *pu64 = (int8_t)u8;
1519 return rcStrict;
1520}
1521
1522#endif /* !IEM_WITH_SETJMP */
1523
1524
1525#ifndef IEM_WITH_SETJMP
1526
1527/**
1528 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1529 *
1530 * @returns Strict VBox status code.
1531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1532 * @param pu16 Where to return the opcode word.
1533 */
1534VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1535{
1536 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1537 if (rcStrict == VINF_SUCCESS)
1538 {
1539 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1540# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1541 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1542# else
1543 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1544# endif
1545 pVCpu->iem.s.offOpcode = offOpcode + 2;
1546 }
1547 else
1548 *pu16 = 0;
1549 return rcStrict;
1550}
1551
1552#else /* IEM_WITH_SETJMP */
1553
1554/**
1555 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1556 *
1557 * @returns The opcode word.
1558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1559 */
1560uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1561{
1562# ifdef IEM_WITH_CODE_TLB
1563 uint16_t u16;
1564 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1565 return u16;
1566# else
1567 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1568 if (rcStrict == VINF_SUCCESS)
1569 {
1570 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1571 pVCpu->iem.s.offOpcode += 2;
1572# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1573 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1574# else
1575 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1576# endif
1577 }
1578 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1579# endif
1580}
1581
1582#endif /* IEM_WITH_SETJMP */
1583
1584#ifndef IEM_WITH_SETJMP
1585
1586/**
1587 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1588 *
1589 * @returns Strict VBox status code.
1590 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1591 * @param pu32 Where to return the opcode double word.
1592 */
1593VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1594{
1595 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1596 if (rcStrict == VINF_SUCCESS)
1597 {
1598 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1599 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1600 pVCpu->iem.s.offOpcode = offOpcode + 2;
1601 }
1602 else
1603 *pu32 = 0;
1604 return rcStrict;
1605}
1606
1607
1608/**
1609 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1610 *
1611 * @returns Strict VBox status code.
1612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1613 * @param pu64 Where to return the opcode quad word.
1614 */
1615VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1616{
1617 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1618 if (rcStrict == VINF_SUCCESS)
1619 {
1620 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1621 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1622 pVCpu->iem.s.offOpcode = offOpcode + 2;
1623 }
1624 else
1625 *pu64 = 0;
1626 return rcStrict;
1627}
1628
1629#endif /* !IEM_WITH_SETJMP */
1630
1631#ifndef IEM_WITH_SETJMP
1632
1633/**
1634 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1635 *
1636 * @returns Strict VBox status code.
1637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1638 * @param pu32 Where to return the opcode dword.
1639 */
1640VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1641{
1642 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1643 if (rcStrict == VINF_SUCCESS)
1644 {
1645 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1646# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1647 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1648# else
1649 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1650 pVCpu->iem.s.abOpcode[offOpcode + 1],
1651 pVCpu->iem.s.abOpcode[offOpcode + 2],
1652 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1653# endif
1654 pVCpu->iem.s.offOpcode = offOpcode + 4;
1655 }
1656 else
1657 *pu32 = 0;
1658 return rcStrict;
1659}
1660
1661#else /* IEM_WITH_SETJMP */
1662
1663/**
1664 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1665 *
1666 * @returns The opcode dword.
1667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1668 */
1669uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1670{
1671# ifdef IEM_WITH_CODE_TLB
1672 uint32_t u32;
1673 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1674 return u32;
1675# else
1676 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1677 if (rcStrict == VINF_SUCCESS)
1678 {
1679 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1680 pVCpu->iem.s.offOpcode = offOpcode + 4;
1681# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1682 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1683# else
1684 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1685 pVCpu->iem.s.abOpcode[offOpcode + 1],
1686 pVCpu->iem.s.abOpcode[offOpcode + 2],
1687 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1688# endif
1689 }
1690 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1691# endif
1692}
1693
1694#endif /* IEM_WITH_SETJMP */
1695
1696#ifndef IEM_WITH_SETJMP
1697
1698/**
1699 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1700 *
1701 * @returns Strict VBox status code.
1702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1703 * @param pu64 Where to return the opcode dword.
1704 */
1705VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1706{
1707 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1708 if (rcStrict == VINF_SUCCESS)
1709 {
1710 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1711 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1712 pVCpu->iem.s.abOpcode[offOpcode + 1],
1713 pVCpu->iem.s.abOpcode[offOpcode + 2],
1714 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1715 pVCpu->iem.s.offOpcode = offOpcode + 4;
1716 }
1717 else
1718 *pu64 = 0;
1719 return rcStrict;
1720}
1721
1722
1723/**
1724 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1725 *
1726 * @returns Strict VBox status code.
1727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1728 * @param pu64 Where to return the opcode qword.
1729 */
1730VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1731{
1732 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1733 if (rcStrict == VINF_SUCCESS)
1734 {
1735 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1736 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1737 pVCpu->iem.s.abOpcode[offOpcode + 1],
1738 pVCpu->iem.s.abOpcode[offOpcode + 2],
1739 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1740 pVCpu->iem.s.offOpcode = offOpcode + 4;
1741 }
1742 else
1743 *pu64 = 0;
1744 return rcStrict;
1745}
1746
1747#endif /* !IEM_WITH_SETJMP */
1748
1749#ifndef IEM_WITH_SETJMP
1750
1751/**
1752 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1753 *
1754 * @returns Strict VBox status code.
1755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1756 * @param pu64 Where to return the opcode qword.
1757 */
1758VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1759{
1760 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1761 if (rcStrict == VINF_SUCCESS)
1762 {
1763 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1764# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1765 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1766# else
1767 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1768 pVCpu->iem.s.abOpcode[offOpcode + 1],
1769 pVCpu->iem.s.abOpcode[offOpcode + 2],
1770 pVCpu->iem.s.abOpcode[offOpcode + 3],
1771 pVCpu->iem.s.abOpcode[offOpcode + 4],
1772 pVCpu->iem.s.abOpcode[offOpcode + 5],
1773 pVCpu->iem.s.abOpcode[offOpcode + 6],
1774 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1775# endif
1776 pVCpu->iem.s.offOpcode = offOpcode + 8;
1777 }
1778 else
1779 *pu64 = 0;
1780 return rcStrict;
1781}
1782
1783#else /* IEM_WITH_SETJMP */
1784
1785/**
1786 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1787 *
1788 * @returns The opcode qword.
1789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1790 */
1791uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1792{
1793# ifdef IEM_WITH_CODE_TLB
1794 uint64_t u64;
1795 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1796 return u64;
1797# else
1798 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1799 if (rcStrict == VINF_SUCCESS)
1800 {
1801 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1802 pVCpu->iem.s.offOpcode = offOpcode + 8;
1803# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1804 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1805# else
1806 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1807 pVCpu->iem.s.abOpcode[offOpcode + 1],
1808 pVCpu->iem.s.abOpcode[offOpcode + 2],
1809 pVCpu->iem.s.abOpcode[offOpcode + 3],
1810 pVCpu->iem.s.abOpcode[offOpcode + 4],
1811 pVCpu->iem.s.abOpcode[offOpcode + 5],
1812 pVCpu->iem.s.abOpcode[offOpcode + 6],
1813 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1814# endif
1815 }
1816 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1817# endif
1818}
1819
1820#endif /* IEM_WITH_SETJMP */
1821
1822
1823
1824/** @name Misc Worker Functions.
1825 * @{
1826 */
1827
1828/**
1829 * Gets the exception class for the specified exception vector.
1830 *
1831 * @returns The class of the specified exception.
1832 * @param uVector The exception vector.
1833 */
1834static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1835{
1836 Assert(uVector <= X86_XCPT_LAST);
1837 switch (uVector)
1838 {
1839 case X86_XCPT_DE:
1840 case X86_XCPT_TS:
1841 case X86_XCPT_NP:
1842 case X86_XCPT_SS:
1843 case X86_XCPT_GP:
1844 case X86_XCPT_SX: /* AMD only */
1845 return IEMXCPTCLASS_CONTRIBUTORY;
1846
1847 case X86_XCPT_PF:
1848 case X86_XCPT_VE: /* Intel only */
1849 return IEMXCPTCLASS_PAGE_FAULT;
1850
1851 case X86_XCPT_DF:
1852 return IEMXCPTCLASS_DOUBLE_FAULT;
1853 }
1854 return IEMXCPTCLASS_BENIGN;
1855}
1856
1857
1858/**
1859 * Evaluates how to handle an exception caused during delivery of another event
1860 * (exception / interrupt).
1861 *
1862 * @returns How to handle the recursive exception.
1863 * @param pVCpu The cross context virtual CPU structure of the
1864 * calling thread.
1865 * @param fPrevFlags The flags of the previous event.
1866 * @param uPrevVector The vector of the previous event.
1867 * @param fCurFlags The flags of the current exception.
1868 * @param uCurVector The vector of the current exception.
1869 * @param pfXcptRaiseInfo Where to store additional information about the
1870 * exception condition. Optional.
1871 */
1872VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1873 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1874{
1875 /*
1876 * Only CPU exceptions can be raised while delivering other events, software interrupt
1877 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1878 */
1879 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1880 Assert(pVCpu); RT_NOREF(pVCpu);
1881 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1882
1883 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1884 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1885 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1886 {
1887 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1888 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1889 {
1890 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1891 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1892 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1893 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1894 {
1895 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1896 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1897 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1898 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1899 uCurVector, pVCpu->cpum.GstCtx.cr2));
1900 }
1901 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1902 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1903 {
1904 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1905 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1906 }
1907 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1908 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1909 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1910 {
1911 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1912 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1913 }
1914 }
1915 else
1916 {
1917 if (uPrevVector == X86_XCPT_NMI)
1918 {
1919 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1920 if (uCurVector == X86_XCPT_PF)
1921 {
1922 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1923 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1924 }
1925 }
1926 else if ( uPrevVector == X86_XCPT_AC
1927 && uCurVector == X86_XCPT_AC)
1928 {
1929 enmRaise = IEMXCPTRAISE_CPU_HANG;
1930 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1931 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1932 }
1933 }
1934 }
1935 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1936 {
1937 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1938 if (uCurVector == X86_XCPT_PF)
1939 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1940 }
1941 else
1942 {
1943 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1944 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1945 }
1946
1947 if (pfXcptRaiseInfo)
1948 *pfXcptRaiseInfo = fRaiseInfo;
1949 return enmRaise;
1950}
1951
1952
1953/**
1954 * Enters the CPU shutdown state initiated by a triple fault or other
1955 * unrecoverable conditions.
1956 *
1957 * @returns Strict VBox status code.
1958 * @param pVCpu The cross context virtual CPU structure of the
1959 * calling thread.
1960 */
1961static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1962{
1963 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1964 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1965
1966 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1967 {
1968 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1969 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1970 }
1971
1972 RT_NOREF(pVCpu);
1973 return VINF_EM_TRIPLE_FAULT;
1974}
1975
1976
1977/**
1978 * Validates a new SS segment.
1979 *
1980 * @returns VBox strict status code.
1981 * @param pVCpu The cross context virtual CPU structure of the
1982 * calling thread.
1983 * @param NewSS The new SS selctor.
1984 * @param uCpl The CPL to load the stack for.
1985 * @param pDesc Where to return the descriptor.
1986 */
1987static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1988{
1989 /* Null selectors are not allowed (we're not called for dispatching
1990 interrupts with SS=0 in long mode). */
1991 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1992 {
1993 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1994 return iemRaiseTaskSwitchFault0(pVCpu);
1995 }
1996
1997 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1998 if ((NewSS & X86_SEL_RPL) != uCpl)
1999 {
2000 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2001 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2002 }
2003
2004 /*
2005 * Read the descriptor.
2006 */
2007 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
2008 if (rcStrict != VINF_SUCCESS)
2009 return rcStrict;
2010
2011 /*
2012 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2013 */
2014 if (!pDesc->Legacy.Gen.u1DescType)
2015 {
2016 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2017 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2018 }
2019
2020 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2021 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2022 {
2023 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2024 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2025 }
2026 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2027 {
2028 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2029 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2030 }
2031
2032 /* Is it there? */
2033 /** @todo testcase: Is this checked before the canonical / limit check below? */
2034 if (!pDesc->Legacy.Gen.u1Present)
2035 {
2036 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2037 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
2038 }
2039
2040 return VINF_SUCCESS;
2041}
2042
2043/** @} */
2044
2045
2046/** @name Raising Exceptions.
2047 *
2048 * @{
2049 */
2050
2051
2052/**
2053 * Loads the specified stack far pointer from the TSS.
2054 *
2055 * @returns VBox strict status code.
2056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2057 * @param uCpl The CPL to load the stack for.
2058 * @param pSelSS Where to return the new stack segment.
2059 * @param puEsp Where to return the new stack pointer.
2060 */
2061static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
2062{
2063 VBOXSTRICTRC rcStrict;
2064 Assert(uCpl < 4);
2065
2066 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2067 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
2068 {
2069 /*
2070 * 16-bit TSS (X86TSS16).
2071 */
2072 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2073 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2074 {
2075 uint32_t off = uCpl * 4 + 2;
2076 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2077 {
2078 /** @todo check actual access pattern here. */
2079 uint32_t u32Tmp = 0; /* gcc maybe... */
2080 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2081 if (rcStrict == VINF_SUCCESS)
2082 {
2083 *puEsp = RT_LOWORD(u32Tmp);
2084 *pSelSS = RT_HIWORD(u32Tmp);
2085 return VINF_SUCCESS;
2086 }
2087 }
2088 else
2089 {
2090 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2091 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2092 }
2093 break;
2094 }
2095
2096 /*
2097 * 32-bit TSS (X86TSS32).
2098 */
2099 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2100 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2101 {
2102 uint32_t off = uCpl * 8 + 4;
2103 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2104 {
2105/** @todo check actual access pattern here. */
2106 uint64_t u64Tmp;
2107 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2108 if (rcStrict == VINF_SUCCESS)
2109 {
2110 *puEsp = u64Tmp & UINT32_MAX;
2111 *pSelSS = (RTSEL)(u64Tmp >> 32);
2112 return VINF_SUCCESS;
2113 }
2114 }
2115 else
2116 {
2117 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2118 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2119 }
2120 break;
2121 }
2122
2123 default:
2124 AssertFailed();
2125 rcStrict = VERR_IEM_IPE_4;
2126 break;
2127 }
2128
2129 *puEsp = 0; /* make gcc happy */
2130 *pSelSS = 0; /* make gcc happy */
2131 return rcStrict;
2132}
2133
2134
2135/**
2136 * Loads the specified stack pointer from the 64-bit TSS.
2137 *
2138 * @returns VBox strict status code.
2139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2140 * @param uCpl The CPL to load the stack for.
2141 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2142 * @param puRsp Where to return the new stack pointer.
2143 */
2144static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2145{
2146 Assert(uCpl < 4);
2147 Assert(uIst < 8);
2148 *puRsp = 0; /* make gcc happy */
2149
2150 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2151 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2152
2153 uint32_t off;
2154 if (uIst)
2155 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2156 else
2157 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2158 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2159 {
2160 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2161 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2162 }
2163
2164 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2165}
2166
2167
2168/**
2169 * Adjust the CPU state according to the exception being raised.
2170 *
2171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2172 * @param u8Vector The exception that has been raised.
2173 */
2174DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2175{
2176 switch (u8Vector)
2177 {
2178 case X86_XCPT_DB:
2179 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2180 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2181 break;
2182 /** @todo Read the AMD and Intel exception reference... */
2183 }
2184}
2185
2186
2187/**
2188 * Implements exceptions and interrupts for real mode.
2189 *
2190 * @returns VBox strict status code.
2191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2192 * @param cbInstr The number of bytes to offset rIP by in the return
2193 * address.
2194 * @param u8Vector The interrupt / exception vector number.
2195 * @param fFlags The flags.
2196 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2197 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2198 */
2199static VBOXSTRICTRC
2200iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2201 uint8_t cbInstr,
2202 uint8_t u8Vector,
2203 uint32_t fFlags,
2204 uint16_t uErr,
2205 uint64_t uCr2) RT_NOEXCEPT
2206{
2207 NOREF(uErr); NOREF(uCr2);
2208 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2209
2210 /*
2211 * Read the IDT entry.
2212 */
2213 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2214 {
2215 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2216 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2217 }
2218 RTFAR16 Idte;
2219 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2220 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2221 {
2222 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2223 return rcStrict;
2224 }
2225
2226#ifdef LOG_ENABLED
2227 /* If software interrupt, try decode it if logging is enabled and such. */
2228 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2229 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2230 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2231#endif
2232
2233 /*
2234 * Push the stack frame.
2235 */
2236 uint8_t bUnmapInfo;
2237 uint16_t *pu16Frame;
2238 uint64_t uNewRsp;
2239 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2240 if (rcStrict != VINF_SUCCESS)
2241 return rcStrict;
2242
2243 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2244#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2245 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2246 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2247 fEfl |= UINT16_C(0xf000);
2248#endif
2249 pu16Frame[2] = (uint16_t)fEfl;
2250 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2251 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2252 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2253 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2254 return rcStrict;
2255
2256 /*
2257 * Load the vector address into cs:ip and make exception specific state
2258 * adjustments.
2259 */
2260 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2261 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2262 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2263 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2264 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2265 pVCpu->cpum.GstCtx.rip = Idte.off;
2266 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2267 IEMMISC_SET_EFL(pVCpu, fEfl);
2268
2269 /** @todo do we actually do this in real mode? */
2270 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2271 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2272
2273 /*
2274 * Deal with debug events that follows the exception and clear inhibit flags.
2275 */
2276 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2277 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
2278 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2279 else
2280 {
2281 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
2282 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
2283 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2284 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2285 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2286 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2287 return iemRaiseDebugException(pVCpu);
2288 }
2289
2290 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2291 so best leave them alone in case we're in a weird kind of real mode... */
2292
2293 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2294}
2295
2296
2297/**
2298 * Loads a NULL data selector into when coming from V8086 mode.
2299 *
2300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2301 * @param pSReg Pointer to the segment register.
2302 */
2303DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2304{
2305 pSReg->Sel = 0;
2306 pSReg->ValidSel = 0;
2307 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2308 {
2309 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2310 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2311 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2312 }
2313 else
2314 {
2315 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2316 /** @todo check this on AMD-V */
2317 pSReg->u64Base = 0;
2318 pSReg->u32Limit = 0;
2319 }
2320}
2321
2322
2323/**
2324 * Loads a segment selector during a task switch in V8086 mode.
2325 *
2326 * @param pSReg Pointer to the segment register.
2327 * @param uSel The selector value to load.
2328 */
2329DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2330{
2331 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2332 pSReg->Sel = uSel;
2333 pSReg->ValidSel = uSel;
2334 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2335 pSReg->u64Base = uSel << 4;
2336 pSReg->u32Limit = 0xffff;
2337 pSReg->Attr.u = 0xf3;
2338}
2339
2340
2341/**
2342 * Loads a segment selector during a task switch in protected mode.
2343 *
2344 * In this task switch scenario, we would throw \#TS exceptions rather than
2345 * \#GPs.
2346 *
2347 * @returns VBox strict status code.
2348 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2349 * @param pSReg Pointer to the segment register.
2350 * @param uSel The new selector value.
2351 *
2352 * @remarks This does _not_ handle CS or SS.
2353 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2354 */
2355static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2356{
2357 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2358
2359 /* Null data selector. */
2360 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2361 {
2362 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2364 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2365 return VINF_SUCCESS;
2366 }
2367
2368 /* Fetch the descriptor. */
2369 IEMSELDESC Desc;
2370 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2371 if (rcStrict != VINF_SUCCESS)
2372 {
2373 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2374 VBOXSTRICTRC_VAL(rcStrict)));
2375 return rcStrict;
2376 }
2377
2378 /* Must be a data segment or readable code segment. */
2379 if ( !Desc.Legacy.Gen.u1DescType
2380 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2381 {
2382 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2383 Desc.Legacy.Gen.u4Type));
2384 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2385 }
2386
2387 /* Check privileges for data segments and non-conforming code segments. */
2388 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2389 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2390 {
2391 /* The RPL and the new CPL must be less than or equal to the DPL. */
2392 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2393 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2394 {
2395 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2396 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2397 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2398 }
2399 }
2400
2401 /* Is it there? */
2402 if (!Desc.Legacy.Gen.u1Present)
2403 {
2404 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2405 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2406 }
2407
2408 /* The base and limit. */
2409 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2410 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2411
2412 /*
2413 * Ok, everything checked out fine. Now set the accessed bit before
2414 * committing the result into the registers.
2415 */
2416 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2417 {
2418 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2419 if (rcStrict != VINF_SUCCESS)
2420 return rcStrict;
2421 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2422 }
2423
2424 /* Commit */
2425 pSReg->Sel = uSel;
2426 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2427 pSReg->u32Limit = cbLimit;
2428 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2429 pSReg->ValidSel = uSel;
2430 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2431 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2432 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2433
2434 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2435 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2436 return VINF_SUCCESS;
2437}
2438
2439
2440/**
2441 * Performs a task switch.
2442 *
2443 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2444 * caller is responsible for performing the necessary checks (like DPL, TSS
2445 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2446 * reference for JMP, CALL, IRET.
2447 *
2448 * If the task switch is the due to a software interrupt or hardware exception,
2449 * the caller is responsible for validating the TSS selector and descriptor. See
2450 * Intel Instruction reference for INT n.
2451 *
2452 * @returns VBox strict status code.
2453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2454 * @param enmTaskSwitch The cause of the task switch.
2455 * @param uNextEip The EIP effective after the task switch.
2456 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2457 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2458 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2459 * @param SelTss The TSS selector of the new task.
2460 * @param pNewDescTss Pointer to the new TSS descriptor.
2461 */
2462VBOXSTRICTRC
2463iemTaskSwitch(PVMCPUCC pVCpu,
2464 IEMTASKSWITCH enmTaskSwitch,
2465 uint32_t uNextEip,
2466 uint32_t fFlags,
2467 uint16_t uErr,
2468 uint64_t uCr2,
2469 RTSEL SelTss,
2470 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2471{
2472 Assert(!IEM_IS_REAL_MODE(pVCpu));
2473 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2474 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2475
2476 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2477 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2478 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2479 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2480 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2481
2482 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2483 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2484
2485 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2486 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2487
2488 /* Update CR2 in case it's a page-fault. */
2489 /** @todo This should probably be done much earlier in IEM/PGM. See
2490 * @bugref{5653#c49}. */
2491 if (fFlags & IEM_XCPT_FLAGS_CR2)
2492 pVCpu->cpum.GstCtx.cr2 = uCr2;
2493
2494 /*
2495 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2496 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2497 */
2498 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2499 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2500 if (uNewTssLimit < uNewTssLimitMin)
2501 {
2502 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2503 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2504 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2505 }
2506
2507 /*
2508 * Task switches in VMX non-root mode always cause task switches.
2509 * The new TSS must have been read and validated (DPL, limits etc.) before a
2510 * task-switch VM-exit commences.
2511 *
2512 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2513 */
2514 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2515 {
2516 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2517 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2518 }
2519
2520 /*
2521 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2522 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2523 */
2524 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2525 {
2526 uint64_t const uExitInfo1 = SelTss;
2527 uint64_t uExitInfo2 = uErr;
2528 switch (enmTaskSwitch)
2529 {
2530 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2531 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2532 default: break;
2533 }
2534 if (fFlags & IEM_XCPT_FLAGS_ERR)
2535 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2536 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2537 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2538
2539 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2540 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2541 RT_NOREF2(uExitInfo1, uExitInfo2);
2542 }
2543
2544 /*
2545 * Check the current TSS limit. The last written byte to the current TSS during the
2546 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2547 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2548 *
2549 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2550 * end up with smaller than "legal" TSS limits.
2551 */
2552 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2553 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2554 if (uCurTssLimit < uCurTssLimitMin)
2555 {
2556 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2557 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2558 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2559 }
2560
2561 /*
2562 * Verify that the new TSS can be accessed and map it. Map only the required contents
2563 * and not the entire TSS.
2564 */
2565 uint8_t bUnmapInfoNewTss;
2566 void *pvNewTss;
2567 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2568 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2569 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2570 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2571 * not perform correct translation if this happens. See Intel spec. 7.2.1
2572 * "Task-State Segment". */
2573 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2574/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2575 * Consider wrapping the remainder into a function for simpler cleanup. */
2576 if (rcStrict != VINF_SUCCESS)
2577 {
2578 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2579 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2580 return rcStrict;
2581 }
2582
2583 /*
2584 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2585 */
2586 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2587 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2588 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2589 {
2590 uint8_t bUnmapInfoDescCurTss;
2591 PX86DESC pDescCurTss;
2592 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2593 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2594 if (rcStrict != VINF_SUCCESS)
2595 {
2596 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2597 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2598 return rcStrict;
2599 }
2600
2601 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2602 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2603 if (rcStrict != VINF_SUCCESS)
2604 {
2605 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2606 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2607 return rcStrict;
2608 }
2609
2610 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2611 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2612 {
2613 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2614 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2615 fEFlags &= ~X86_EFL_NT;
2616 }
2617 }
2618
2619 /*
2620 * Save the CPU state into the current TSS.
2621 */
2622 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2623 if (GCPtrNewTss == GCPtrCurTss)
2624 {
2625 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2626 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2627 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2628 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2629 pVCpu->cpum.GstCtx.ldtr.Sel));
2630 }
2631 if (fIsNewTss386)
2632 {
2633 /*
2634 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2635 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2636 */
2637 uint8_t bUnmapInfoCurTss32;
2638 void *pvCurTss32;
2639 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2640 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2641 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2642 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2643 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2644 if (rcStrict != VINF_SUCCESS)
2645 {
2646 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2647 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2648 return rcStrict;
2649 }
2650
2651 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2652 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2653 pCurTss32->eip = uNextEip;
2654 pCurTss32->eflags = fEFlags;
2655 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2656 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2657 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2658 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2659 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2660 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2661 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2662 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2663 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2664 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2665 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2666 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2667 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2668 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2669
2670 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2671 if (rcStrict != VINF_SUCCESS)
2672 {
2673 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2674 VBOXSTRICTRC_VAL(rcStrict)));
2675 return rcStrict;
2676 }
2677 }
2678 else
2679 {
2680 /*
2681 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2682 */
2683 uint8_t bUnmapInfoCurTss16;
2684 void *pvCurTss16;
2685 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2686 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2687 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2688 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2689 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2690 if (rcStrict != VINF_SUCCESS)
2691 {
2692 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2693 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2694 return rcStrict;
2695 }
2696
2697 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2698 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2699 pCurTss16->ip = uNextEip;
2700 pCurTss16->flags = (uint16_t)fEFlags;
2701 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2702 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2703 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2704 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2705 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2706 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2707 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2708 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2709 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2710 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2711 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2712 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2713
2714 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2715 if (rcStrict != VINF_SUCCESS)
2716 {
2717 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2718 VBOXSTRICTRC_VAL(rcStrict)));
2719 return rcStrict;
2720 }
2721 }
2722
2723 /*
2724 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2725 */
2726 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2727 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2728 {
2729 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2730 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2731 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2732 }
2733
2734 /*
2735 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2736 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2737 */
2738 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2739 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2740 bool fNewDebugTrap;
2741 if (fIsNewTss386)
2742 {
2743 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2744 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2745 uNewEip = pNewTss32->eip;
2746 uNewEflags = pNewTss32->eflags;
2747 uNewEax = pNewTss32->eax;
2748 uNewEcx = pNewTss32->ecx;
2749 uNewEdx = pNewTss32->edx;
2750 uNewEbx = pNewTss32->ebx;
2751 uNewEsp = pNewTss32->esp;
2752 uNewEbp = pNewTss32->ebp;
2753 uNewEsi = pNewTss32->esi;
2754 uNewEdi = pNewTss32->edi;
2755 uNewES = pNewTss32->es;
2756 uNewCS = pNewTss32->cs;
2757 uNewSS = pNewTss32->ss;
2758 uNewDS = pNewTss32->ds;
2759 uNewFS = pNewTss32->fs;
2760 uNewGS = pNewTss32->gs;
2761 uNewLdt = pNewTss32->selLdt;
2762 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2763 }
2764 else
2765 {
2766 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2767 uNewCr3 = 0;
2768 uNewEip = pNewTss16->ip;
2769 uNewEflags = pNewTss16->flags;
2770 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2771 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2772 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2773 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2774 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2775 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2776 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2777 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2778 uNewES = pNewTss16->es;
2779 uNewCS = pNewTss16->cs;
2780 uNewSS = pNewTss16->ss;
2781 uNewDS = pNewTss16->ds;
2782 uNewFS = 0;
2783 uNewGS = 0;
2784 uNewLdt = pNewTss16->selLdt;
2785 fNewDebugTrap = false;
2786 }
2787
2788 if (GCPtrNewTss == GCPtrCurTss)
2789 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2790 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2791
2792 /*
2793 * We're done accessing the new TSS.
2794 */
2795 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2796 if (rcStrict != VINF_SUCCESS)
2797 {
2798 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2799 return rcStrict;
2800 }
2801
2802 /*
2803 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2804 */
2805 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2806 {
2807 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2808 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2809 if (rcStrict != VINF_SUCCESS)
2810 {
2811 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2812 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2813 return rcStrict;
2814 }
2815
2816 /* Check that the descriptor indicates the new TSS is available (not busy). */
2817 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2818 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2819 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2820
2821 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2822 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2823 if (rcStrict != VINF_SUCCESS)
2824 {
2825 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2826 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2827 return rcStrict;
2828 }
2829 }
2830
2831 /*
2832 * From this point on, we're technically in the new task. We will defer exceptions
2833 * until the completion of the task switch but before executing any instructions in the new task.
2834 */
2835 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2836 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2837 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2838 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2839 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2840 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2841 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2842
2843 /* Set the busy bit in TR. */
2844 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2845
2846 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2847 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2848 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2849 {
2850 uNewEflags |= X86_EFL_NT;
2851 }
2852
2853 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2854 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2855 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2856
2857 pVCpu->cpum.GstCtx.eip = uNewEip;
2858 pVCpu->cpum.GstCtx.eax = uNewEax;
2859 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2860 pVCpu->cpum.GstCtx.edx = uNewEdx;
2861 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2862 pVCpu->cpum.GstCtx.esp = uNewEsp;
2863 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2864 pVCpu->cpum.GstCtx.esi = uNewEsi;
2865 pVCpu->cpum.GstCtx.edi = uNewEdi;
2866
2867 uNewEflags &= X86_EFL_LIVE_MASK;
2868 uNewEflags |= X86_EFL_RA1_MASK;
2869 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2870
2871 /*
2872 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2873 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2874 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2875 */
2876 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2877 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2878
2879 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2880 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2881
2882 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2883 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2884
2885 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2886 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2887
2888 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2889 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2890
2891 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2892 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2893 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2894
2895 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2896 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2897 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2898 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2899
2900 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2901 {
2902 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2903 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2904 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2905 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2906 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2907 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2908 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2909 }
2910
2911 /*
2912 * Switch CR3 for the new task.
2913 */
2914 if ( fIsNewTss386
2915 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2916 {
2917 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2918 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2919 AssertRCSuccessReturn(rc, rc);
2920
2921 /* Inform PGM. */
2922 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2923 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2924 AssertRCReturn(rc, rc);
2925 /* ignore informational status codes */
2926
2927 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2928 }
2929
2930 /*
2931 * Switch LDTR for the new task.
2932 */
2933 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2934 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2935 else
2936 {
2937 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2938
2939 IEMSELDESC DescNewLdt;
2940 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2941 if (rcStrict != VINF_SUCCESS)
2942 {
2943 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2944 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2945 return rcStrict;
2946 }
2947 if ( !DescNewLdt.Legacy.Gen.u1Present
2948 || DescNewLdt.Legacy.Gen.u1DescType
2949 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2950 {
2951 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2952 uNewLdt, DescNewLdt.Legacy.u));
2953 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2954 }
2955
2956 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2957 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2958 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2959 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2960 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2961 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2962 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2963 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2964 }
2965
2966 IEMSELDESC DescSS;
2967 if (IEM_IS_V86_MODE(pVCpu))
2968 {
2969 IEM_SET_CPL(pVCpu, 3);
2970 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2971 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2972 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2973 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2974 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2975 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2976
2977 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2978 DescSS.Legacy.u = 0;
2979 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2980 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2981 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2982 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2983 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2984 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2985 DescSS.Legacy.Gen.u2Dpl = 3;
2986 }
2987 else
2988 {
2989 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2990
2991 /*
2992 * Load the stack segment for the new task.
2993 */
2994 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2995 {
2996 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2997 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2998 }
2999
3000 /* Fetch the descriptor. */
3001 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
3002 if (rcStrict != VINF_SUCCESS)
3003 {
3004 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3005 VBOXSTRICTRC_VAL(rcStrict)));
3006 return rcStrict;
3007 }
3008
3009 /* SS must be a data segment and writable. */
3010 if ( !DescSS.Legacy.Gen.u1DescType
3011 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3012 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3013 {
3014 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3015 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3016 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3017 }
3018
3019 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3020 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3021 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3022 {
3023 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3024 uNewCpl));
3025 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3026 }
3027
3028 /* Is it there? */
3029 if (!DescSS.Legacy.Gen.u1Present)
3030 {
3031 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3032 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3033 }
3034
3035 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3036 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3037
3038 /* Set the accessed bit before committing the result into SS. */
3039 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3040 {
3041 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3042 if (rcStrict != VINF_SUCCESS)
3043 return rcStrict;
3044 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3045 }
3046
3047 /* Commit SS. */
3048 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3049 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3050 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3051 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
3052 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
3053 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3054 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
3055
3056 /* CPL has changed, update IEM before loading rest of segments. */
3057 IEM_SET_CPL(pVCpu, uNewCpl);
3058
3059 /*
3060 * Load the data segments for the new task.
3061 */
3062 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
3063 if (rcStrict != VINF_SUCCESS)
3064 return rcStrict;
3065 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
3066 if (rcStrict != VINF_SUCCESS)
3067 return rcStrict;
3068 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
3069 if (rcStrict != VINF_SUCCESS)
3070 return rcStrict;
3071 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
3072 if (rcStrict != VINF_SUCCESS)
3073 return rcStrict;
3074
3075 /*
3076 * Load the code segment for the new task.
3077 */
3078 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3079 {
3080 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3081 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3082 }
3083
3084 /* Fetch the descriptor. */
3085 IEMSELDESC DescCS;
3086 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
3087 if (rcStrict != VINF_SUCCESS)
3088 {
3089 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3090 return rcStrict;
3091 }
3092
3093 /* CS must be a code segment. */
3094 if ( !DescCS.Legacy.Gen.u1DescType
3095 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3096 {
3097 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3098 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3099 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3100 }
3101
3102 /* For conforming CS, DPL must be less than or equal to the RPL. */
3103 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3104 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3105 {
3106 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3107 DescCS.Legacy.Gen.u2Dpl));
3108 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3109 }
3110
3111 /* For non-conforming CS, DPL must match RPL. */
3112 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3113 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3114 {
3115 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3116 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3117 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3118 }
3119
3120 /* Is it there? */
3121 if (!DescCS.Legacy.Gen.u1Present)
3122 {
3123 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3124 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3125 }
3126
3127 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3128 u64Base = X86DESC_BASE(&DescCS.Legacy);
3129
3130 /* Set the accessed bit before committing the result into CS. */
3131 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3132 {
3133 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3134 if (rcStrict != VINF_SUCCESS)
3135 return rcStrict;
3136 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3137 }
3138
3139 /* Commit CS. */
3140 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3141 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3142 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3143 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3144 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3145 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3146 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3147 }
3148
3149 /* Make sure the CPU mode is correct. */
3150 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3151 if (fExecNew != pVCpu->iem.s.fExec)
3152 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3153 pVCpu->iem.s.fExec = fExecNew;
3154
3155 /** @todo Debug trap. */
3156 if (fIsNewTss386 && fNewDebugTrap)
3157 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3158
3159 /*
3160 * Construct the error code masks based on what caused this task switch.
3161 * See Intel Instruction reference for INT.
3162 */
3163 uint16_t uExt;
3164 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3165 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3166 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3167 uExt = 1;
3168 else
3169 uExt = 0;
3170
3171 /*
3172 * Push any error code on to the new stack.
3173 */
3174 if (fFlags & IEM_XCPT_FLAGS_ERR)
3175 {
3176 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3177 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3178 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3179
3180 /* Check that there is sufficient space on the stack. */
3181 /** @todo Factor out segment limit checking for normal/expand down segments
3182 * into a separate function. */
3183 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3184 {
3185 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3186 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3187 {
3188 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3189 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3190 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3191 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3192 }
3193 }
3194 else
3195 {
3196 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3197 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3198 {
3199 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3200 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3201 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3202 }
3203 }
3204
3205
3206 if (fIsNewTss386)
3207 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3208 else
3209 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3210 if (rcStrict != VINF_SUCCESS)
3211 {
3212 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3213 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3214 return rcStrict;
3215 }
3216 }
3217
3218 /* Check the new EIP against the new CS limit. */
3219 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3220 {
3221 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3222 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3223 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3224 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3225 }
3226
3227 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3228 pVCpu->cpum.GstCtx.ss.Sel));
3229 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3230}
3231
3232
3233/**
3234 * Implements exceptions and interrupts for protected mode.
3235 *
3236 * @returns VBox strict status code.
3237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3238 * @param cbInstr The number of bytes to offset rIP by in the return
3239 * address.
3240 * @param u8Vector The interrupt / exception vector number.
3241 * @param fFlags The flags.
3242 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3243 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3244 */
3245static VBOXSTRICTRC
3246iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3247 uint8_t cbInstr,
3248 uint8_t u8Vector,
3249 uint32_t fFlags,
3250 uint16_t uErr,
3251 uint64_t uCr2) RT_NOEXCEPT
3252{
3253 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3254
3255 /*
3256 * Read the IDT entry.
3257 */
3258 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3259 {
3260 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3261 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3262 }
3263 X86DESC Idte;
3264 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3265 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3266 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3267 {
3268 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3269 return rcStrict;
3270 }
3271 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3272 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3273 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3274 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3275
3276 /*
3277 * Check the descriptor type, DPL and such.
3278 * ASSUMES this is done in the same order as described for call-gate calls.
3279 */
3280 if (Idte.Gate.u1DescType)
3281 {
3282 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3283 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3284 }
3285 bool fTaskGate = false;
3286 uint8_t f32BitGate = true;
3287 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3288 switch (Idte.Gate.u4Type)
3289 {
3290 case X86_SEL_TYPE_SYS_UNDEFINED:
3291 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3292 case X86_SEL_TYPE_SYS_LDT:
3293 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3294 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3295 case X86_SEL_TYPE_SYS_UNDEFINED2:
3296 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3297 case X86_SEL_TYPE_SYS_UNDEFINED3:
3298 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3299 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3300 case X86_SEL_TYPE_SYS_UNDEFINED4:
3301 {
3302 /** @todo check what actually happens when the type is wrong...
3303 * esp. call gates. */
3304 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3305 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3306 }
3307
3308 case X86_SEL_TYPE_SYS_286_INT_GATE:
3309 f32BitGate = false;
3310 RT_FALL_THRU();
3311 case X86_SEL_TYPE_SYS_386_INT_GATE:
3312 fEflToClear |= X86_EFL_IF;
3313 break;
3314
3315 case X86_SEL_TYPE_SYS_TASK_GATE:
3316 fTaskGate = true;
3317#ifndef IEM_IMPLEMENTS_TASKSWITCH
3318 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3319#endif
3320 break;
3321
3322 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3323 f32BitGate = false;
3324 break;
3325 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3326 break;
3327
3328 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3329 }
3330
3331 /* Check DPL against CPL if applicable. */
3332 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3333 {
3334 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3335 {
3336 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3337 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3338 }
3339 }
3340
3341 /* Is it there? */
3342 if (!Idte.Gate.u1Present)
3343 {
3344 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3345 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3346 }
3347
3348 /* Is it a task-gate? */
3349 if (fTaskGate)
3350 {
3351 /*
3352 * Construct the error code masks based on what caused this task switch.
3353 * See Intel Instruction reference for INT.
3354 */
3355 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3356 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3357 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3358 RTSEL SelTss = Idte.Gate.u16Sel;
3359
3360 /*
3361 * Fetch the TSS descriptor in the GDT.
3362 */
3363 IEMSELDESC DescTSS;
3364 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3365 if (rcStrict != VINF_SUCCESS)
3366 {
3367 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3368 VBOXSTRICTRC_VAL(rcStrict)));
3369 return rcStrict;
3370 }
3371
3372 /* The TSS descriptor must be a system segment and be available (not busy). */
3373 if ( DescTSS.Legacy.Gen.u1DescType
3374 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3375 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3376 {
3377 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3378 u8Vector, SelTss, DescTSS.Legacy.au64));
3379 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3380 }
3381
3382 /* The TSS must be present. */
3383 if (!DescTSS.Legacy.Gen.u1Present)
3384 {
3385 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3386 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3387 }
3388
3389 /* Do the actual task switch. */
3390 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3391 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3392 fFlags, uErr, uCr2, SelTss, &DescTSS);
3393 }
3394
3395 /* A null CS is bad. */
3396 RTSEL NewCS = Idte.Gate.u16Sel;
3397 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3398 {
3399 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3400 return iemRaiseGeneralProtectionFault0(pVCpu);
3401 }
3402
3403 /* Fetch the descriptor for the new CS. */
3404 IEMSELDESC DescCS;
3405 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3406 if (rcStrict != VINF_SUCCESS)
3407 {
3408 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3409 return rcStrict;
3410 }
3411
3412 /* Must be a code segment. */
3413 if (!DescCS.Legacy.Gen.u1DescType)
3414 {
3415 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3416 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3417 }
3418 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3419 {
3420 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3421 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3422 }
3423
3424 /* Don't allow lowering the privilege level. */
3425 /** @todo Does the lowering of privileges apply to software interrupts
3426 * only? This has bearings on the more-privileged or
3427 * same-privilege stack behavior further down. A testcase would
3428 * be nice. */
3429 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3430 {
3431 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3432 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3433 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3434 }
3435
3436 /* Make sure the selector is present. */
3437 if (!DescCS.Legacy.Gen.u1Present)
3438 {
3439 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3440 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3441 }
3442
3443#ifdef LOG_ENABLED
3444 /* If software interrupt, try decode it if logging is enabled and such. */
3445 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3446 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3447 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3448#endif
3449
3450 /* Check the new EIP against the new CS limit. */
3451 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3452 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3453 ? Idte.Gate.u16OffsetLow
3454 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3455 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3456 if (uNewEip > cbLimitCS)
3457 {
3458 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3459 u8Vector, uNewEip, cbLimitCS, NewCS));
3460 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3461 }
3462 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3463
3464 /* Calc the flag image to push. */
3465 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3466 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3467 fEfl &= ~X86_EFL_RF;
3468 else
3469 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3470
3471 /* From V8086 mode only go to CPL 0. */
3472 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3473 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3474 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3475 {
3476 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3477 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3478 }
3479
3480 /*
3481 * If the privilege level changes, we need to get a new stack from the TSS.
3482 * This in turns means validating the new SS and ESP...
3483 */
3484 if (uNewCpl != IEM_GET_CPL(pVCpu))
3485 {
3486 RTSEL NewSS;
3487 uint32_t uNewEsp;
3488 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3489 if (rcStrict != VINF_SUCCESS)
3490 return rcStrict;
3491
3492 IEMSELDESC DescSS;
3493 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3494 if (rcStrict != VINF_SUCCESS)
3495 return rcStrict;
3496 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3497 if (!DescSS.Legacy.Gen.u1DefBig)
3498 {
3499 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3500 uNewEsp = (uint16_t)uNewEsp;
3501 }
3502
3503 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3504
3505 /* Check that there is sufficient space for the stack frame. */
3506 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3507 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3508 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3509 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3510
3511 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3512 {
3513 if ( uNewEsp - 1 > cbLimitSS
3514 || uNewEsp < cbStackFrame)
3515 {
3516 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3517 u8Vector, NewSS, uNewEsp, cbStackFrame));
3518 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3519 }
3520 }
3521 else
3522 {
3523 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3524 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3525 {
3526 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3527 u8Vector, NewSS, uNewEsp, cbStackFrame));
3528 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3529 }
3530 }
3531
3532 /*
3533 * Start making changes.
3534 */
3535
3536 /* Set the new CPL so that stack accesses use it. */
3537 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3538 IEM_SET_CPL(pVCpu, uNewCpl);
3539
3540 /* Create the stack frame. */
3541 uint8_t bUnmapInfoStackFrame;
3542 RTPTRUNION uStackFrame;
3543 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3544 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3545 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3546 if (rcStrict != VINF_SUCCESS)
3547 return rcStrict;
3548 if (f32BitGate)
3549 {
3550 if (fFlags & IEM_XCPT_FLAGS_ERR)
3551 *uStackFrame.pu32++ = uErr;
3552 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3553 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3554 uStackFrame.pu32[2] = fEfl;
3555 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3556 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3557 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3558 if (fEfl & X86_EFL_VM)
3559 {
3560 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3561 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3562 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3563 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3564 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3565 }
3566 }
3567 else
3568 {
3569 if (fFlags & IEM_XCPT_FLAGS_ERR)
3570 *uStackFrame.pu16++ = uErr;
3571 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3572 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3573 uStackFrame.pu16[2] = fEfl;
3574 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3575 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3576 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3577 if (fEfl & X86_EFL_VM)
3578 {
3579 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3580 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3581 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3582 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3583 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3584 }
3585 }
3586 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3587 if (rcStrict != VINF_SUCCESS)
3588 return rcStrict;
3589
3590 /* Mark the selectors 'accessed' (hope this is the correct time). */
3591 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3592 * after pushing the stack frame? (Write protect the gdt + stack to
3593 * find out.) */
3594 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3595 {
3596 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3597 if (rcStrict != VINF_SUCCESS)
3598 return rcStrict;
3599 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3600 }
3601
3602 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3603 {
3604 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3605 if (rcStrict != VINF_SUCCESS)
3606 return rcStrict;
3607 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3608 }
3609
3610 /*
3611 * Start comitting the register changes (joins with the DPL=CPL branch).
3612 */
3613 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3614 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3615 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3616 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3617 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3618 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3619 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3620 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3621 * SP is loaded).
3622 * Need to check the other combinations too:
3623 * - 16-bit TSS, 32-bit handler
3624 * - 32-bit TSS, 16-bit handler */
3625 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3626 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3627 else
3628 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3629
3630 if (fEfl & X86_EFL_VM)
3631 {
3632 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3633 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3634 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3635 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3636 }
3637 }
3638 /*
3639 * Same privilege, no stack change and smaller stack frame.
3640 */
3641 else
3642 {
3643 uint64_t uNewRsp;
3644 uint8_t bUnmapInfoStackFrame;
3645 RTPTRUNION uStackFrame;
3646 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3647 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3648 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3649 if (rcStrict != VINF_SUCCESS)
3650 return rcStrict;
3651
3652 if (f32BitGate)
3653 {
3654 if (fFlags & IEM_XCPT_FLAGS_ERR)
3655 *uStackFrame.pu32++ = uErr;
3656 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3657 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3658 uStackFrame.pu32[2] = fEfl;
3659 }
3660 else
3661 {
3662 if (fFlags & IEM_XCPT_FLAGS_ERR)
3663 *uStackFrame.pu16++ = uErr;
3664 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3665 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3666 uStackFrame.pu16[2] = fEfl;
3667 }
3668 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3669 if (rcStrict != VINF_SUCCESS)
3670 return rcStrict;
3671
3672 /* Mark the CS selector as 'accessed'. */
3673 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3674 {
3675 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3676 if (rcStrict != VINF_SUCCESS)
3677 return rcStrict;
3678 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3679 }
3680
3681 /*
3682 * Start committing the register changes (joins with the other branch).
3683 */
3684 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3685 }
3686
3687 /* ... register committing continues. */
3688 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3689 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3690 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3691 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3692 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3693 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3694
3695 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3696 fEfl &= ~fEflToClear;
3697 IEMMISC_SET_EFL(pVCpu, fEfl);
3698
3699 if (fFlags & IEM_XCPT_FLAGS_CR2)
3700 pVCpu->cpum.GstCtx.cr2 = uCr2;
3701
3702 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3703 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3704
3705 /* Make sure the execution flags are correct. */
3706 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3707 if (fExecNew != pVCpu->iem.s.fExec)
3708 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3709 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3710 pVCpu->iem.s.fExec = fExecNew;
3711 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3712
3713 /*
3714 * Deal with debug events that follows the exception and clear inhibit flags.
3715 */
3716 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3717 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
3718 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3719 else
3720 {
3721 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
3722 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
3723 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
3724 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
3725 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
3726 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3727 return iemRaiseDebugException(pVCpu);
3728 }
3729
3730 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3731}
3732
3733
3734/**
3735 * Implements exceptions and interrupts for long mode.
3736 *
3737 * @returns VBox strict status code.
3738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3739 * @param cbInstr The number of bytes to offset rIP by in the return
3740 * address.
3741 * @param u8Vector The interrupt / exception vector number.
3742 * @param fFlags The flags.
3743 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3744 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3745 */
3746static VBOXSTRICTRC
3747iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3748 uint8_t cbInstr,
3749 uint8_t u8Vector,
3750 uint32_t fFlags,
3751 uint16_t uErr,
3752 uint64_t uCr2) RT_NOEXCEPT
3753{
3754 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3755
3756 /*
3757 * Read the IDT entry.
3758 */
3759 uint16_t offIdt = (uint16_t)u8Vector << 4;
3760 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3761 {
3762 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3763 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3764 }
3765 X86DESC64 Idte;
3766#ifdef _MSC_VER /* Shut up silly compiler warning. */
3767 Idte.au64[0] = 0;
3768 Idte.au64[1] = 0;
3769#endif
3770 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3771 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3772 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3773 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3774 {
3775 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3776 return rcStrict;
3777 }
3778 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3779 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3780 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3781
3782 /*
3783 * Check the descriptor type, DPL and such.
3784 * ASSUMES this is done in the same order as described for call-gate calls.
3785 */
3786 if (Idte.Gate.u1DescType)
3787 {
3788 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3789 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3790 }
3791 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3792 switch (Idte.Gate.u4Type)
3793 {
3794 case AMD64_SEL_TYPE_SYS_INT_GATE:
3795 fEflToClear |= X86_EFL_IF;
3796 break;
3797 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3798 break;
3799
3800 default:
3801 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3802 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3803 }
3804
3805 /* Check DPL against CPL if applicable. */
3806 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3807 {
3808 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3809 {
3810 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3811 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3812 }
3813 }
3814
3815 /* Is it there? */
3816 if (!Idte.Gate.u1Present)
3817 {
3818 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3819 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3820 }
3821
3822 /* A null CS is bad. */
3823 RTSEL NewCS = Idte.Gate.u16Sel;
3824 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3825 {
3826 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3827 return iemRaiseGeneralProtectionFault0(pVCpu);
3828 }
3829
3830 /* Fetch the descriptor for the new CS. */
3831 IEMSELDESC DescCS;
3832 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3833 if (rcStrict != VINF_SUCCESS)
3834 {
3835 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3836 return rcStrict;
3837 }
3838
3839 /* Must be a 64-bit code segment. */
3840 if (!DescCS.Long.Gen.u1DescType)
3841 {
3842 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3843 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3844 }
3845 if ( !DescCS.Long.Gen.u1Long
3846 || DescCS.Long.Gen.u1DefBig
3847 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3848 {
3849 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3850 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3851 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3852 }
3853
3854 /* Don't allow lowering the privilege level. For non-conforming CS
3855 selectors, the CS.DPL sets the privilege level the trap/interrupt
3856 handler runs at. For conforming CS selectors, the CPL remains
3857 unchanged, but the CS.DPL must be <= CPL. */
3858 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3859 * when CPU in Ring-0. Result \#GP? */
3860 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3861 {
3862 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3863 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3864 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3865 }
3866
3867
3868 /* Make sure the selector is present. */
3869 if (!DescCS.Legacy.Gen.u1Present)
3870 {
3871 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3872 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3873 }
3874
3875 /* Check that the new RIP is canonical. */
3876 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3877 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3878 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3879 if (!IEM_IS_CANONICAL(uNewRip))
3880 {
3881 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3882 return iemRaiseGeneralProtectionFault0(pVCpu);
3883 }
3884
3885 /*
3886 * If the privilege level changes or if the IST isn't zero, we need to get
3887 * a new stack from the TSS.
3888 */
3889 uint64_t uNewRsp;
3890 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3891 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3892 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3893 || Idte.Gate.u3IST != 0)
3894 {
3895 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3896 if (rcStrict != VINF_SUCCESS)
3897 return rcStrict;
3898 }
3899 else
3900 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3901 uNewRsp &= ~(uint64_t)0xf;
3902
3903 /*
3904 * Calc the flag image to push.
3905 */
3906 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3907 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3908 fEfl &= ~X86_EFL_RF;
3909 else
3910 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3911
3912 /*
3913 * Start making changes.
3914 */
3915 /* Set the new CPL so that stack accesses use it. */
3916 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3917 IEM_SET_CPL(pVCpu, uNewCpl);
3918/** @todo Setting CPL this early seems wrong as it would affect and errors we
3919 * raise accessing the stack and (?) GDT/LDT... */
3920
3921 /* Create the stack frame. */
3922 uint8_t bUnmapInfoStackFrame;
3923 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3924 RTPTRUNION uStackFrame;
3925 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3926 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3927 if (rcStrict != VINF_SUCCESS)
3928 return rcStrict;
3929
3930 if (fFlags & IEM_XCPT_FLAGS_ERR)
3931 *uStackFrame.pu64++ = uErr;
3932 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3933 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3934 uStackFrame.pu64[2] = fEfl;
3935 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3936 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3937 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3938 if (rcStrict != VINF_SUCCESS)
3939 return rcStrict;
3940
3941 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3942 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3943 * after pushing the stack frame? (Write protect the gdt + stack to
3944 * find out.) */
3945 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3946 {
3947 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3948 if (rcStrict != VINF_SUCCESS)
3949 return rcStrict;
3950 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3951 }
3952
3953 /*
3954 * Start comitting the register changes.
3955 */
3956 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3957 * hidden registers when interrupting 32-bit or 16-bit code! */
3958 if (uNewCpl != uOldCpl)
3959 {
3960 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3961 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3962 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3963 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3964 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3965 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3966 }
3967 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3968 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3969 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3970 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3971 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3972 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3973 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3974 pVCpu->cpum.GstCtx.rip = uNewRip;
3975
3976 fEfl &= ~fEflToClear;
3977 IEMMISC_SET_EFL(pVCpu, fEfl);
3978
3979 if (fFlags & IEM_XCPT_FLAGS_CR2)
3980 pVCpu->cpum.GstCtx.cr2 = uCr2;
3981
3982 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3983 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3984
3985 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
3986
3987 /*
3988 * Deal with debug events that follows the exception and clear inhibit flags.
3989 */
3990 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3991 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
3992 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3993 else
3994 {
3995 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
3996 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
3997 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
3998 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
3999 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
4000 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4001 return iemRaiseDebugException(pVCpu);
4002 }
4003
4004 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4005}
4006
4007
4008/**
4009 * Implements exceptions and interrupts.
4010 *
4011 * All exceptions and interrupts goes thru this function!
4012 *
4013 * @returns VBox strict status code.
4014 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4015 * @param cbInstr The number of bytes to offset rIP by in the return
4016 * address.
4017 * @param u8Vector The interrupt / exception vector number.
4018 * @param fFlags The flags.
4019 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4020 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4021 */
4022VBOXSTRICTRC
4023iemRaiseXcptOrInt(PVMCPUCC pVCpu,
4024 uint8_t cbInstr,
4025 uint8_t u8Vector,
4026 uint32_t fFlags,
4027 uint16_t uErr,
4028 uint64_t uCr2) RT_NOEXCEPT
4029{
4030 /*
4031 * Get all the state that we might need here.
4032 */
4033 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4034 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4035
4036#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
4037 /*
4038 * Flush prefetch buffer
4039 */
4040 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4041#endif
4042
4043 /*
4044 * Perform the V8086 IOPL check and upgrade the fault without nesting.
4045 */
4046 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
4047 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
4048 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
4049 | IEM_XCPT_FLAGS_BP_INSTR
4050 | IEM_XCPT_FLAGS_ICEBP_INSTR
4051 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
4052 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
4053 {
4054 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
4055 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4056 u8Vector = X86_XCPT_GP;
4057 uErr = 0;
4058 }
4059
4060 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
4061#ifdef DBGFTRACE_ENABLED
4062 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
4063 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
4064 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
4065#endif
4066
4067 /*
4068 * Check if DBGF wants to intercept the exception.
4069 */
4070 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
4071 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
4072 { /* likely */ }
4073 else
4074 {
4075 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
4076 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
4077 if (rcStrict != VINF_SUCCESS)
4078 return rcStrict;
4079 }
4080
4081 /*
4082 * Evaluate whether NMI blocking should be in effect.
4083 * Normally, NMI blocking is in effect whenever we inject an NMI.
4084 */
4085 bool fBlockNmi = u8Vector == X86_XCPT_NMI
4086 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
4087
4088#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4089 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4090 {
4091 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
4092 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4093 return rcStrict0;
4094
4095 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
4096 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
4097 {
4098 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
4099 fBlockNmi = false;
4100 }
4101 }
4102#endif
4103
4104#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4105 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
4106 {
4107 /*
4108 * If the event is being injected as part of VMRUN, it isn't subject to event
4109 * intercepts in the nested-guest. However, secondary exceptions that occur
4110 * during injection of any event -are- subject to exception intercepts.
4111 *
4112 * See AMD spec. 15.20 "Event Injection".
4113 */
4114 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
4115 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
4116 else
4117 {
4118 /*
4119 * Check and handle if the event being raised is intercepted.
4120 */
4121 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4122 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
4123 return rcStrict0;
4124 }
4125 }
4126#endif
4127
4128 /*
4129 * Set NMI blocking if necessary.
4130 */
4131 if (fBlockNmi)
4132 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
4133
4134 /*
4135 * Do recursion accounting.
4136 */
4137 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
4138 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
4139 if (pVCpu->iem.s.cXcptRecursions == 0)
4140 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4141 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
4142 else
4143 {
4144 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4145 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
4146 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4147
4148 if (pVCpu->iem.s.cXcptRecursions >= 4)
4149 {
4150#ifdef DEBUG_bird
4151 AssertFailed();
4152#endif
4153 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4154 }
4155
4156 /*
4157 * Evaluate the sequence of recurring events.
4158 */
4159 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4160 NULL /* pXcptRaiseInfo */);
4161 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4162 { /* likely */ }
4163 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4164 {
4165 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4166 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4167 u8Vector = X86_XCPT_DF;
4168 uErr = 0;
4169#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4170 /* VMX nested-guest #DF intercept needs to be checked here. */
4171 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4172 {
4173 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4174 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4175 return rcStrict0;
4176 }
4177#endif
4178 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4179 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4180 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4181 }
4182 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4183 {
4184 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4185 return iemInitiateCpuShutdown(pVCpu);
4186 }
4187 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4188 {
4189 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4190 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4191 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4192 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4193 return VERR_EM_GUEST_CPU_HANG;
4194 }
4195 else
4196 {
4197 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4198 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4199 return VERR_IEM_IPE_9;
4200 }
4201
4202 /*
4203 * The 'EXT' bit is set when an exception occurs during deliver of an external
4204 * event (such as an interrupt or earlier exception)[1]. Privileged software
4205 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4206 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4207 *
4208 * [1] - Intel spec. 6.13 "Error Code"
4209 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4210 * [3] - Intel Instruction reference for INT n.
4211 */
4212 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4213 && (fFlags & IEM_XCPT_FLAGS_ERR)
4214 && u8Vector != X86_XCPT_PF
4215 && u8Vector != X86_XCPT_DF)
4216 {
4217 uErr |= X86_TRAP_ERR_EXTERNAL;
4218 }
4219 }
4220
4221 pVCpu->iem.s.cXcptRecursions++;
4222 pVCpu->iem.s.uCurXcpt = u8Vector;
4223 pVCpu->iem.s.fCurXcpt = fFlags;
4224 pVCpu->iem.s.uCurXcptErr = uErr;
4225 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4226
4227 /*
4228 * Extensive logging.
4229 */
4230#if defined(LOG_ENABLED) && defined(IN_RING3)
4231 if (LogIs3Enabled())
4232 {
4233 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4234 char szRegs[4096];
4235 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4236 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4237 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4238 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4239 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4240 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4241 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4242 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4243 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4244 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4245 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4246 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4247 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4248 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4249 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4250 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4251 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4252 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4253 " efer=%016VR{efer}\n"
4254 " pat=%016VR{pat}\n"
4255 " sf_mask=%016VR{sf_mask}\n"
4256 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4257 " lstar=%016VR{lstar}\n"
4258 " star=%016VR{star} cstar=%016VR{cstar}\n"
4259 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4260 );
4261
4262 char szInstr[256];
4263 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4264 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4265 szInstr, sizeof(szInstr), NULL);
4266 Log3(("%s%s\n", szRegs, szInstr));
4267 }
4268#endif /* LOG_ENABLED */
4269
4270 /*
4271 * Stats.
4272 */
4273 uint64_t const uTimestamp = ASMReadTSC();
4274 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4275 {
4276 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4277 EMHistoryAddExit(pVCpu,
4278 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4279 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4280 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4281 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4282 }
4283 else
4284 {
4285 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4286 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4287 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4288 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4289 if (fFlags & IEM_XCPT_FLAGS_ERR)
4290 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4291 if (fFlags & IEM_XCPT_FLAGS_CR2)
4292 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4293 }
4294
4295 /*
4296 * Hack alert! Convert incoming debug events to slient on Intel.
4297 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
4298 */
4299 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4300 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4301 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
4302 { /* ignore */ }
4303 else
4304 {
4305 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",
4306 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));
4307 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
4308 | CPUMCTX_DBG_HIT_DRX_SILENT;
4309 }
4310
4311 /*
4312 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4313 * to ensure that a stale TLB or paging cache entry will only cause one
4314 * spurious #PF.
4315 */
4316 if ( u8Vector == X86_XCPT_PF
4317 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4318 IEMTlbInvalidatePage(pVCpu, uCr2);
4319
4320 /*
4321 * Call the mode specific worker function.
4322 */
4323 VBOXSTRICTRC rcStrict;
4324 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4325 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4326 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4327 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4328 else
4329 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4330
4331 /* Flush the prefetch buffer. */
4332 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4333
4334 /*
4335 * Unwind.
4336 */
4337 pVCpu->iem.s.cXcptRecursions--;
4338 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4339 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4340 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4341 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4342 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4343 return rcStrict;
4344}
4345
4346#ifdef IEM_WITH_SETJMP
4347/**
4348 * See iemRaiseXcptOrInt. Will not return.
4349 */
4350DECL_NO_RETURN(void)
4351iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4352 uint8_t cbInstr,
4353 uint8_t u8Vector,
4354 uint32_t fFlags,
4355 uint16_t uErr,
4356 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4357{
4358 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4359 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4360}
4361#endif
4362
4363
4364/** \#DE - 00. */
4365VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4366{
4367 if (GCMIsInterceptingXcptDE(pVCpu))
4368 {
4369 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
4370 if (rc == VINF_SUCCESS)
4371 {
4372 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
4373 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
4374 }
4375 }
4376 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4377}
4378
4379
4380#ifdef IEM_WITH_SETJMP
4381/** \#DE - 00. */
4382DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4383{
4384 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4385}
4386#endif
4387
4388
4389/** \#DB - 01.
4390 * @note This automatically clear DR7.GD. */
4391VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4392{
4393 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4394 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4395 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4396}
4397
4398
4399/** \#BR - 05. */
4400VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4401{
4402 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4403}
4404
4405
4406/** \#UD - 06. */
4407VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4408{
4409 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4410}
4411
4412
4413#ifdef IEM_WITH_SETJMP
4414/** \#UD - 06. */
4415DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4416{
4417 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4418}
4419#endif
4420
4421
4422/** \#NM - 07. */
4423VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4424{
4425 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4426}
4427
4428
4429#ifdef IEM_WITH_SETJMP
4430/** \#NM - 07. */
4431DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4432{
4433 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4434}
4435#endif
4436
4437
4438/** \#TS(err) - 0a. */
4439VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4440{
4441 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4442}
4443
4444
4445/** \#TS(tr) - 0a. */
4446VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4447{
4448 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4449 pVCpu->cpum.GstCtx.tr.Sel, 0);
4450}
4451
4452
4453/** \#TS(0) - 0a. */
4454VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4455{
4456 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4457 0, 0);
4458}
4459
4460
4461/** \#TS(err) - 0a. */
4462VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4463{
4464 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4465 uSel & X86_SEL_MASK_OFF_RPL, 0);
4466}
4467
4468
4469/** \#NP(err) - 0b. */
4470VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4471{
4472 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4473}
4474
4475
4476/** \#NP(sel) - 0b. */
4477VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4478{
4479 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4480 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4481 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4482 uSel & ~X86_SEL_RPL, 0);
4483}
4484
4485
4486/** \#SS(seg) - 0c. */
4487VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4488{
4489 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4490 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4491 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4492 uSel & ~X86_SEL_RPL, 0);
4493}
4494
4495
4496/** \#SS(err) - 0c. */
4497VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4498{
4499 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4500 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4501 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4502}
4503
4504
4505/** \#GP(n) - 0d. */
4506VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4507{
4508 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4509 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4510}
4511
4512
4513/** \#GP(0) - 0d. */
4514VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4515{
4516 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4517 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4518}
4519
4520#ifdef IEM_WITH_SETJMP
4521/** \#GP(0) - 0d. */
4522DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4523{
4524 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4525 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4526}
4527#endif
4528
4529
4530/** \#GP(sel) - 0d. */
4531VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4532{
4533 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4534 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4535 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4536 Sel & ~X86_SEL_RPL, 0);
4537}
4538
4539
4540/** \#GP(0) - 0d. */
4541VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4542{
4543 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4544 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4545}
4546
4547
4548/** \#GP(sel) - 0d. */
4549VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4550{
4551 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4552 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4553 NOREF(iSegReg); NOREF(fAccess);
4554 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4555 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4556}
4557
4558#ifdef IEM_WITH_SETJMP
4559/** \#GP(sel) - 0d, longjmp. */
4560DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4561{
4562 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4563 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4564 NOREF(iSegReg); NOREF(fAccess);
4565 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4566 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4567}
4568#endif
4569
4570/** \#GP(sel) - 0d. */
4571VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4572{
4573 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4574 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4575 NOREF(Sel);
4576 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4577}
4578
4579#ifdef IEM_WITH_SETJMP
4580/** \#GP(sel) - 0d, longjmp. */
4581DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4582{
4583 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4584 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4585 NOREF(Sel);
4586 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4587}
4588#endif
4589
4590
4591/** \#GP(sel) - 0d. */
4592VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4593{
4594 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4595 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4596 NOREF(iSegReg); NOREF(fAccess);
4597 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4598}
4599
4600#ifdef IEM_WITH_SETJMP
4601/** \#GP(sel) - 0d, longjmp. */
4602DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4603{
4604 NOREF(iSegReg); NOREF(fAccess);
4605 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4606}
4607#endif
4608
4609
4610/** \#PF(n) - 0e. */
4611VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4612{
4613 uint16_t uErr;
4614 switch (rc)
4615 {
4616 case VERR_PAGE_NOT_PRESENT:
4617 case VERR_PAGE_TABLE_NOT_PRESENT:
4618 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4619 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4620 uErr = 0;
4621 break;
4622
4623 case VERR_RESERVED_PAGE_TABLE_BITS:
4624 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
4625 break;
4626
4627 default:
4628 AssertMsgFailed(("%Rrc\n", rc));
4629 RT_FALL_THRU();
4630 case VERR_ACCESS_DENIED:
4631 uErr = X86_TRAP_PF_P;
4632 break;
4633 }
4634
4635 if (IEM_GET_CPL(pVCpu) == 3)
4636 uErr |= X86_TRAP_PF_US;
4637
4638 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4639 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4640 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4641 uErr |= X86_TRAP_PF_ID;
4642
4643#if 0 /* This is so much non-sense, really. Why was it done like that? */
4644 /* Note! RW access callers reporting a WRITE protection fault, will clear
4645 the READ flag before calling. So, read-modify-write accesses (RW)
4646 can safely be reported as READ faults. */
4647 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4648 uErr |= X86_TRAP_PF_RW;
4649#else
4650 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4651 {
4652 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4653 /// (regardless of outcome of the comparison in the latter case).
4654 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4655 uErr |= X86_TRAP_PF_RW;
4656 }
4657#endif
4658
4659 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4660 of the memory operand rather than at the start of it. (Not sure what
4661 happens if it crosses a page boundrary.) The current heuristics for
4662 this is to report the #PF for the last byte if the access is more than
4663 64 bytes. This is probably not correct, but we can work that out later,
4664 main objective now is to get FXSAVE to work like for real hardware and
4665 make bs3-cpu-basic2 work. */
4666 if (cbAccess <= 64)
4667 { /* likely*/ }
4668 else
4669 GCPtrWhere += cbAccess - 1;
4670
4671 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4672 uErr, GCPtrWhere);
4673}
4674
4675#ifdef IEM_WITH_SETJMP
4676/** \#PF(n) - 0e, longjmp. */
4677DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4678 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4679{
4680 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4681}
4682#endif
4683
4684
4685/** \#MF(0) - 10. */
4686VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4687{
4688 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4689 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4690
4691 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4692 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4693 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4694}
4695
4696#ifdef IEM_WITH_SETJMP
4697/** \#MF(0) - 10, longjmp. */
4698DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4699{
4700 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4701}
4702#endif
4703
4704
4705/** \#AC(0) - 11. */
4706VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4707{
4708 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4709}
4710
4711#ifdef IEM_WITH_SETJMP
4712/** \#AC(0) - 11, longjmp. */
4713DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4714{
4715 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4716}
4717#endif
4718
4719
4720/** \#XF(0)/\#XM(0) - 19. */
4721VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4722{
4723 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4724}
4725
4726
4727#ifdef IEM_WITH_SETJMP
4728/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4729DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4730{
4731 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4732}
4733#endif
4734
4735
4736/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4737IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4738{
4739 NOREF(cbInstr);
4740 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4741}
4742
4743
4744/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4745IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4746{
4747 NOREF(cbInstr);
4748 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4749}
4750
4751
4752/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4753IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4754{
4755 NOREF(cbInstr);
4756 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4757}
4758
4759
4760/** @} */
4761
4762/** @name Common opcode decoders.
4763 * @{
4764 */
4765//#include <iprt/mem.h>
4766
4767/**
4768 * Used to add extra details about a stub case.
4769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4770 */
4771void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4772{
4773#if defined(LOG_ENABLED) && defined(IN_RING3)
4774 PVM pVM = pVCpu->CTX_SUFF(pVM);
4775 char szRegs[4096];
4776 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4777 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4778 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4779 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4780 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4781 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4782 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4783 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4784 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4785 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4786 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4787 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4788 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4789 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4790 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4791 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4792 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4793 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4794 " efer=%016VR{efer}\n"
4795 " pat=%016VR{pat}\n"
4796 " sf_mask=%016VR{sf_mask}\n"
4797 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4798 " lstar=%016VR{lstar}\n"
4799 " star=%016VR{star} cstar=%016VR{cstar}\n"
4800 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4801 );
4802
4803 char szInstr[256];
4804 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4805 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4806 szInstr, sizeof(szInstr), NULL);
4807
4808 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4809#else
4810 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4811#endif
4812}
4813
4814/** @} */
4815
4816
4817
4818/** @name Register Access.
4819 * @{
4820 */
4821
4822/**
4823 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4824 *
4825 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4826 * segment limit.
4827 *
4828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4829 * @param cbInstr Instruction size.
4830 * @param offNextInstr The offset of the next instruction.
4831 * @param enmEffOpSize Effective operand size.
4832 */
4833VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4834 IEMMODE enmEffOpSize) RT_NOEXCEPT
4835{
4836 switch (enmEffOpSize)
4837 {
4838 case IEMMODE_16BIT:
4839 {
4840 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4841 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4842 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4843 pVCpu->cpum.GstCtx.rip = uNewIp;
4844 else
4845 return iemRaiseGeneralProtectionFault0(pVCpu);
4846 break;
4847 }
4848
4849 case IEMMODE_32BIT:
4850 {
4851 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4852 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4853
4854 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4855 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4856 pVCpu->cpum.GstCtx.rip = uNewEip;
4857 else
4858 return iemRaiseGeneralProtectionFault0(pVCpu);
4859 break;
4860 }
4861
4862 case IEMMODE_64BIT:
4863 {
4864 Assert(IEM_IS_64BIT_CODE(pVCpu));
4865
4866 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4867 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4868 pVCpu->cpum.GstCtx.rip = uNewRip;
4869 else
4870 return iemRaiseGeneralProtectionFault0(pVCpu);
4871 break;
4872 }
4873
4874 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4875 }
4876
4877#ifndef IEM_WITH_CODE_TLB
4878 /* Flush the prefetch buffer. */
4879 pVCpu->iem.s.cbOpcode = cbInstr;
4880#endif
4881
4882 /*
4883 * Clear RF and finish the instruction (maybe raise #DB).
4884 */
4885 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4886}
4887
4888
4889/**
4890 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4891 *
4892 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4893 * segment limit.
4894 *
4895 * @returns Strict VBox status code.
4896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4897 * @param cbInstr Instruction size.
4898 * @param offNextInstr The offset of the next instruction.
4899 */
4900VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4901{
4902 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4903
4904 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4905 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4906 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4907 pVCpu->cpum.GstCtx.rip = uNewIp;
4908 else
4909 return iemRaiseGeneralProtectionFault0(pVCpu);
4910
4911#ifndef IEM_WITH_CODE_TLB
4912 /* Flush the prefetch buffer. */
4913 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4914#endif
4915
4916 /*
4917 * Clear RF and finish the instruction (maybe raise #DB).
4918 */
4919 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4920}
4921
4922
4923/**
4924 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4925 *
4926 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4927 * segment limit.
4928 *
4929 * @returns Strict VBox status code.
4930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4931 * @param cbInstr Instruction size.
4932 * @param offNextInstr The offset of the next instruction.
4933 * @param enmEffOpSize Effective operand size.
4934 */
4935VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4936 IEMMODE enmEffOpSize) RT_NOEXCEPT
4937{
4938 if (enmEffOpSize == IEMMODE_32BIT)
4939 {
4940 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4941
4942 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4943 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4944 pVCpu->cpum.GstCtx.rip = uNewEip;
4945 else
4946 return iemRaiseGeneralProtectionFault0(pVCpu);
4947 }
4948 else
4949 {
4950 Assert(enmEffOpSize == IEMMODE_64BIT);
4951
4952 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4953 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4954 pVCpu->cpum.GstCtx.rip = uNewRip;
4955 else
4956 return iemRaiseGeneralProtectionFault0(pVCpu);
4957 }
4958
4959#ifndef IEM_WITH_CODE_TLB
4960 /* Flush the prefetch buffer. */
4961 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4962#endif
4963
4964 /*
4965 * Clear RF and finish the instruction (maybe raise #DB).
4966 */
4967 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4968}
4969
4970/** @} */
4971
4972
4973/** @name FPU access and helpers.
4974 *
4975 * @{
4976 */
4977
4978/**
4979 * Updates the x87.DS and FPUDP registers.
4980 *
4981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4982 * @param pFpuCtx The FPU context.
4983 * @param iEffSeg The effective segment register.
4984 * @param GCPtrEff The effective address relative to @a iEffSeg.
4985 */
4986DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4987{
4988 RTSEL sel;
4989 switch (iEffSeg)
4990 {
4991 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4992 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4993 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4994 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4995 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4996 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4997 default:
4998 AssertMsgFailed(("%d\n", iEffSeg));
4999 sel = pVCpu->cpum.GstCtx.ds.Sel;
5000 }
5001 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5002 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5003 {
5004 pFpuCtx->DS = 0;
5005 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
5006 }
5007 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
5008 {
5009 pFpuCtx->DS = sel;
5010 pFpuCtx->FPUDP = GCPtrEff;
5011 }
5012 else
5013 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
5014}
5015
5016
5017/**
5018 * Rotates the stack registers in the push direction.
5019 *
5020 * @param pFpuCtx The FPU context.
5021 * @remarks This is a complete waste of time, but fxsave stores the registers in
5022 * stack order.
5023 */
5024DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5025{
5026 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5027 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5028 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5029 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5030 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5031 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5032 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5033 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5034 pFpuCtx->aRegs[0].r80 = r80Tmp;
5035}
5036
5037
5038/**
5039 * Rotates the stack registers in the pop direction.
5040 *
5041 * @param pFpuCtx The FPU context.
5042 * @remarks This is a complete waste of time, but fxsave stores the registers in
5043 * stack order.
5044 */
5045DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5046{
5047 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5048 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5049 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5050 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5051 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5052 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5053 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5054 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5055 pFpuCtx->aRegs[7].r80 = r80Tmp;
5056}
5057
5058
5059/**
5060 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5061 * exception prevents it.
5062 *
5063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5064 * @param pResult The FPU operation result to push.
5065 * @param pFpuCtx The FPU context.
5066 */
5067static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5068{
5069 /* Update FSW and bail if there are pending exceptions afterwards. */
5070 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5071 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5072 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5073 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5074 {
5075 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
5076 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
5077 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5078 pFpuCtx->FSW = fFsw;
5079 return;
5080 }
5081
5082 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5083 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5084 {
5085 /* All is fine, push the actual value. */
5086 pFpuCtx->FTW |= RT_BIT(iNewTop);
5087 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5088 }
5089 else if (pFpuCtx->FCW & X86_FCW_IM)
5090 {
5091 /* Masked stack overflow, push QNaN. */
5092 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5093 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5094 }
5095 else
5096 {
5097 /* Raise stack overflow, don't push anything. */
5098 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5099 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5100 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5101 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5102 return;
5103 }
5104
5105 fFsw &= ~X86_FSW_TOP_MASK;
5106 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5107 pFpuCtx->FSW = fFsw;
5108
5109 iemFpuRotateStackPush(pFpuCtx);
5110 RT_NOREF(pVCpu);
5111}
5112
5113
5114/**
5115 * Stores a result in a FPU register and updates the FSW and FTW.
5116 *
5117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5118 * @param pFpuCtx The FPU context.
5119 * @param pResult The result to store.
5120 * @param iStReg Which FPU register to store it in.
5121 */
5122static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5123{
5124 Assert(iStReg < 8);
5125 uint16_t fNewFsw = pFpuCtx->FSW;
5126 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
5127 fNewFsw &= ~X86_FSW_C_MASK;
5128 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5129 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5130 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5131 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5132 pFpuCtx->FSW = fNewFsw;
5133 pFpuCtx->FTW |= RT_BIT(iReg);
5134 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5135 RT_NOREF(pVCpu);
5136}
5137
5138
5139/**
5140 * Only updates the FPU status word (FSW) with the result of the current
5141 * instruction.
5142 *
5143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5144 * @param pFpuCtx The FPU context.
5145 * @param u16FSW The FSW output of the current instruction.
5146 */
5147static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
5148{
5149 uint16_t fNewFsw = pFpuCtx->FSW;
5150 fNewFsw &= ~X86_FSW_C_MASK;
5151 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
5152 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5153 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5154 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5155 pFpuCtx->FSW = fNewFsw;
5156 RT_NOREF(pVCpu);
5157}
5158
5159
5160/**
5161 * Pops one item off the FPU stack if no pending exception prevents it.
5162 *
5163 * @param pFpuCtx The FPU context.
5164 */
5165static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5166{
5167 /* Check pending exceptions. */
5168 uint16_t uFSW = pFpuCtx->FSW;
5169 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5170 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5171 return;
5172
5173 /* TOP--. */
5174 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5175 uFSW &= ~X86_FSW_TOP_MASK;
5176 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5177 pFpuCtx->FSW = uFSW;
5178
5179 /* Mark the previous ST0 as empty. */
5180 iOldTop >>= X86_FSW_TOP_SHIFT;
5181 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5182
5183 /* Rotate the registers. */
5184 iemFpuRotateStackPop(pFpuCtx);
5185}
5186
5187
5188/**
5189 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5190 *
5191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5192 * @param pResult The FPU operation result to push.
5193 * @param uFpuOpcode The FPU opcode value.
5194 */
5195void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5196{
5197 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5198 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5199 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5200}
5201
5202
5203/**
5204 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5205 * and sets FPUDP and FPUDS.
5206 *
5207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5208 * @param pResult The FPU operation result to push.
5209 * @param iEffSeg The effective segment register.
5210 * @param GCPtrEff The effective address relative to @a iEffSeg.
5211 * @param uFpuOpcode The FPU opcode value.
5212 */
5213void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5214 uint16_t uFpuOpcode) RT_NOEXCEPT
5215{
5216 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5217 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5218 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5219 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5220}
5221
5222
5223/**
5224 * Replace ST0 with the first value and push the second onto the FPU stack,
5225 * unless a pending exception prevents it.
5226 *
5227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5228 * @param pResult The FPU operation result to store and push.
5229 * @param uFpuOpcode The FPU opcode value.
5230 */
5231void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5232{
5233 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5234 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5235
5236 /* Update FSW and bail if there are pending exceptions afterwards. */
5237 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5238 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5239 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5240 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5241 {
5242 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5243 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5244 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5245 pFpuCtx->FSW = fFsw;
5246 return;
5247 }
5248
5249 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5250 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5251 {
5252 /* All is fine, push the actual value. */
5253 pFpuCtx->FTW |= RT_BIT(iNewTop);
5254 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5255 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5256 }
5257 else if (pFpuCtx->FCW & X86_FCW_IM)
5258 {
5259 /* Masked stack overflow, push QNaN. */
5260 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5261 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5262 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5263 }
5264 else
5265 {
5266 /* Raise stack overflow, don't push anything. */
5267 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5268 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5269 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5270 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5271 return;
5272 }
5273
5274 fFsw &= ~X86_FSW_TOP_MASK;
5275 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5276 pFpuCtx->FSW = fFsw;
5277
5278 iemFpuRotateStackPush(pFpuCtx);
5279}
5280
5281
5282/**
5283 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5284 * FOP.
5285 *
5286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5287 * @param pResult The result to store.
5288 * @param iStReg Which FPU register to store it in.
5289 * @param uFpuOpcode The FPU opcode value.
5290 */
5291void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5292{
5293 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5294 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5295 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5296}
5297
5298
5299/**
5300 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5301 * FOP, and then pops the stack.
5302 *
5303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5304 * @param pResult The result to store.
5305 * @param iStReg Which FPU register to store it in.
5306 * @param uFpuOpcode The FPU opcode value.
5307 */
5308void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5309{
5310 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5311 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5312 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5313 iemFpuMaybePopOne(pFpuCtx);
5314}
5315
5316
5317/**
5318 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5319 * FPUDP, and FPUDS.
5320 *
5321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5322 * @param pResult The result to store.
5323 * @param iStReg Which FPU register to store it in.
5324 * @param iEffSeg The effective memory operand selector register.
5325 * @param GCPtrEff The effective memory operand offset.
5326 * @param uFpuOpcode The FPU opcode value.
5327 */
5328void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5329 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5330{
5331 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5332 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5333 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5334 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5335}
5336
5337
5338/**
5339 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5340 * FPUDP, and FPUDS, and then pops the stack.
5341 *
5342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5343 * @param pResult The result to store.
5344 * @param iStReg Which FPU register to store it in.
5345 * @param iEffSeg The effective memory operand selector register.
5346 * @param GCPtrEff The effective memory operand offset.
5347 * @param uFpuOpcode The FPU opcode value.
5348 */
5349void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5350 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5351{
5352 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5353 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5354 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5355 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5356 iemFpuMaybePopOne(pFpuCtx);
5357}
5358
5359
5360/**
5361 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5362 *
5363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5364 * @param uFpuOpcode The FPU opcode value.
5365 */
5366void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5367{
5368 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5369 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5370}
5371
5372
5373/**
5374 * Updates the FSW, FOP, FPUIP, and FPUCS.
5375 *
5376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5377 * @param u16FSW The FSW from the current instruction.
5378 * @param uFpuOpcode The FPU opcode value.
5379 */
5380void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5381{
5382 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5383 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5384 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5385}
5386
5387
5388/**
5389 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5390 *
5391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5392 * @param u16FSW The FSW from the current instruction.
5393 * @param uFpuOpcode The FPU opcode value.
5394 */
5395void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5396{
5397 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5398 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5399 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5400 iemFpuMaybePopOne(pFpuCtx);
5401}
5402
5403
5404/**
5405 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5406 *
5407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5408 * @param u16FSW The FSW from the current instruction.
5409 * @param iEffSeg The effective memory operand selector register.
5410 * @param GCPtrEff The effective memory operand offset.
5411 * @param uFpuOpcode The FPU opcode value.
5412 */
5413void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5414{
5415 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5416 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5417 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5418 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5419}
5420
5421
5422/**
5423 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5424 *
5425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5426 * @param u16FSW The FSW from the current instruction.
5427 * @param uFpuOpcode The FPU opcode value.
5428 */
5429void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5430{
5431 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5432 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5433 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5434 iemFpuMaybePopOne(pFpuCtx);
5435 iemFpuMaybePopOne(pFpuCtx);
5436}
5437
5438
5439/**
5440 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5441 *
5442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5443 * @param u16FSW The FSW from the current instruction.
5444 * @param iEffSeg The effective memory operand selector register.
5445 * @param GCPtrEff The effective memory operand offset.
5446 * @param uFpuOpcode The FPU opcode value.
5447 */
5448void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5449 uint16_t uFpuOpcode) RT_NOEXCEPT
5450{
5451 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5452 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5453 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5454 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5455 iemFpuMaybePopOne(pFpuCtx);
5456}
5457
5458
5459/**
5460 * Worker routine for raising an FPU stack underflow exception.
5461 *
5462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5463 * @param pFpuCtx The FPU context.
5464 * @param iStReg The stack register being accessed.
5465 */
5466static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5467{
5468 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5469 if (pFpuCtx->FCW & X86_FCW_IM)
5470 {
5471 /* Masked underflow. */
5472 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5473 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5474 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5475 if (iStReg != UINT8_MAX)
5476 {
5477 pFpuCtx->FTW |= RT_BIT(iReg);
5478 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5479 }
5480 }
5481 else
5482 {
5483 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5484 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5485 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5486 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5487 }
5488 RT_NOREF(pVCpu);
5489}
5490
5491
5492/**
5493 * Raises a FPU stack underflow exception.
5494 *
5495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5496 * @param iStReg The destination register that should be loaded
5497 * with QNaN if \#IS is not masked. Specify
5498 * UINT8_MAX if none (like for fcom).
5499 * @param uFpuOpcode The FPU opcode value.
5500 */
5501void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5502{
5503 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5504 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5505 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5506}
5507
5508
5509void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5510{
5511 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5512 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5513 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5514 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5515}
5516
5517
5518void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5519{
5520 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5521 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5522 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5523 iemFpuMaybePopOne(pFpuCtx);
5524}
5525
5526
5527void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5528 uint16_t uFpuOpcode) RT_NOEXCEPT
5529{
5530 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5531 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5532 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5533 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5534 iemFpuMaybePopOne(pFpuCtx);
5535}
5536
5537
5538void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5539{
5540 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5541 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5542 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5543 iemFpuMaybePopOne(pFpuCtx);
5544 iemFpuMaybePopOne(pFpuCtx);
5545}
5546
5547
5548void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5549{
5550 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5551 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5552
5553 if (pFpuCtx->FCW & X86_FCW_IM)
5554 {
5555 /* Masked overflow - Push QNaN. */
5556 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5557 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5558 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5559 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5560 pFpuCtx->FTW |= RT_BIT(iNewTop);
5561 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5562 iemFpuRotateStackPush(pFpuCtx);
5563 }
5564 else
5565 {
5566 /* Exception pending - don't change TOP or the register stack. */
5567 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5568 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5569 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5570 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5571 }
5572}
5573
5574
5575void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5576{
5577 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5578 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5579
5580 if (pFpuCtx->FCW & X86_FCW_IM)
5581 {
5582 /* Masked overflow - Push QNaN. */
5583 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5584 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5585 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5586 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5587 pFpuCtx->FTW |= RT_BIT(iNewTop);
5588 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5589 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5590 iemFpuRotateStackPush(pFpuCtx);
5591 }
5592 else
5593 {
5594 /* Exception pending - don't change TOP or the register stack. */
5595 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5596 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5597 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5598 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5599 }
5600}
5601
5602
5603/**
5604 * Worker routine for raising an FPU stack overflow exception on a push.
5605 *
5606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5607 * @param pFpuCtx The FPU context.
5608 */
5609static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5610{
5611 if (pFpuCtx->FCW & X86_FCW_IM)
5612 {
5613 /* Masked overflow. */
5614 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5615 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5616 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5617 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5618 pFpuCtx->FTW |= RT_BIT(iNewTop);
5619 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5620 iemFpuRotateStackPush(pFpuCtx);
5621 }
5622 else
5623 {
5624 /* Exception pending - don't change TOP or the register stack. */
5625 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5626 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5627 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5628 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5629 }
5630 RT_NOREF(pVCpu);
5631}
5632
5633
5634/**
5635 * Raises a FPU stack overflow exception on a push.
5636 *
5637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5638 * @param uFpuOpcode The FPU opcode value.
5639 */
5640void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5641{
5642 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5643 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5644 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5645}
5646
5647
5648/**
5649 * Raises a FPU stack overflow exception on a push with a memory operand.
5650 *
5651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5652 * @param iEffSeg The effective memory operand selector register.
5653 * @param GCPtrEff The effective memory operand offset.
5654 * @param uFpuOpcode The FPU opcode value.
5655 */
5656void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5657{
5658 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5659 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5660 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5661 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5662}
5663
5664/** @} */
5665
5666
5667/** @name Memory access.
5668 *
5669 * @{
5670 */
5671
5672#undef LOG_GROUP
5673#define LOG_GROUP LOG_GROUP_IEM_MEM
5674
5675/**
5676 * Updates the IEMCPU::cbWritten counter if applicable.
5677 *
5678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5679 * @param fAccess The access being accounted for.
5680 * @param cbMem The access size.
5681 */
5682DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5683{
5684 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5685 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5686 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5687}
5688
5689
5690/**
5691 * Applies the segment limit, base and attributes.
5692 *
5693 * This may raise a \#GP or \#SS.
5694 *
5695 * @returns VBox strict status code.
5696 *
5697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5698 * @param fAccess The kind of access which is being performed.
5699 * @param iSegReg The index of the segment register to apply.
5700 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5701 * TSS, ++).
5702 * @param cbMem The access size.
5703 * @param pGCPtrMem Pointer to the guest memory address to apply
5704 * segmentation to. Input and output parameter.
5705 */
5706VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5707{
5708 if (iSegReg == UINT8_MAX)
5709 return VINF_SUCCESS;
5710
5711 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5712 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5713 switch (IEM_GET_CPU_MODE(pVCpu))
5714 {
5715 case IEMMODE_16BIT:
5716 case IEMMODE_32BIT:
5717 {
5718 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5719 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5720
5721 if ( pSel->Attr.n.u1Present
5722 && !pSel->Attr.n.u1Unusable)
5723 {
5724 Assert(pSel->Attr.n.u1DescType);
5725 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5726 {
5727 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5728 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5729 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5730
5731 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5732 {
5733 /** @todo CPL check. */
5734 }
5735
5736 /*
5737 * There are two kinds of data selectors, normal and expand down.
5738 */
5739 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5740 {
5741 if ( GCPtrFirst32 > pSel->u32Limit
5742 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5743 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5744 }
5745 else
5746 {
5747 /*
5748 * The upper boundary is defined by the B bit, not the G bit!
5749 */
5750 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5751 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5752 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5753 }
5754 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5755 }
5756 else
5757 {
5758 /*
5759 * Code selector and usually be used to read thru, writing is
5760 * only permitted in real and V8086 mode.
5761 */
5762 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5763 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5764 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5765 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5766 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5767
5768 if ( GCPtrFirst32 > pSel->u32Limit
5769 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5770 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5771
5772 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5773 {
5774 /** @todo CPL check. */
5775 }
5776
5777 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5778 }
5779 }
5780 else
5781 return iemRaiseGeneralProtectionFault0(pVCpu);
5782 return VINF_SUCCESS;
5783 }
5784
5785 case IEMMODE_64BIT:
5786 {
5787 RTGCPTR GCPtrMem = *pGCPtrMem;
5788 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5789 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5790
5791 Assert(cbMem >= 1);
5792 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5793 return VINF_SUCCESS;
5794 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5795 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5796 return iemRaiseGeneralProtectionFault0(pVCpu);
5797 }
5798
5799 default:
5800 AssertFailedReturn(VERR_IEM_IPE_7);
5801 }
5802}
5803
5804
5805/**
5806 * Translates a virtual address to a physical physical address and checks if we
5807 * can access the page as specified.
5808 *
5809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5810 * @param GCPtrMem The virtual address.
5811 * @param cbAccess The access size, for raising \#PF correctly for
5812 * FXSAVE and such.
5813 * @param fAccess The intended access.
5814 * @param pGCPhysMem Where to return the physical address.
5815 */
5816VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5817 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5818{
5819 /** @todo Need a different PGM interface here. We're currently using
5820 * generic / REM interfaces. this won't cut it for R0. */
5821 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5822 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5823 * here. */
5824 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5825 PGMPTWALKFAST WalkFast;
5826 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
5827 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
5828 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
5829 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
5830 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
5831 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
5832 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5833 fQPage |= PGMQPAGE_F_USER_MODE;
5834 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
5835 if (RT_SUCCESS(rc))
5836 {
5837 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
5838
5839 /* If the page is writable and does not have the no-exec bit set, all
5840 access is allowed. Otherwise we'll have to check more carefully... */
5841 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
5842 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
5843 || (WalkFast.fEffective & X86_PTE_RW)
5844 || ( ( IEM_GET_CPL(pVCpu) != 3
5845 || (fAccess & IEM_ACCESS_WHAT_SYS))
5846 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
5847 && ( (WalkFast.fEffective & X86_PTE_US)
5848 || IEM_GET_CPL(pVCpu) != 3
5849 || (fAccess & IEM_ACCESS_WHAT_SYS) )
5850 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
5851 || !(WalkFast.fEffective & X86_PTE_PAE_NX)
5852 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5853 )
5854 );
5855
5856 /* PGMGstQueryPageFast sets the A & D bits. */
5857 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5858 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
5859
5860 *pGCPhysMem = WalkFast.GCPhys;
5861 return VINF_SUCCESS;
5862 }
5863
5864 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5865 /** @todo Check unassigned memory in unpaged mode. */
5866#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5867 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
5868 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5869#endif
5870 *pGCPhysMem = NIL_RTGCPHYS;
5871 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5872}
5873
5874#if 0 /*unused*/
5875/**
5876 * Looks up a memory mapping entry.
5877 *
5878 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5880 * @param pvMem The memory address.
5881 * @param fAccess The access to.
5882 */
5883DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5884{
5885 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5886 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5887 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5888 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5889 return 0;
5890 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5891 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5892 return 1;
5893 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5894 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5895 return 2;
5896 return VERR_NOT_FOUND;
5897}
5898#endif
5899
5900/**
5901 * Finds a free memmap entry when using iNextMapping doesn't work.
5902 *
5903 * @returns Memory mapping index, 1024 on failure.
5904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5905 */
5906static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5907{
5908 /*
5909 * The easy case.
5910 */
5911 if (pVCpu->iem.s.cActiveMappings == 0)
5912 {
5913 pVCpu->iem.s.iNextMapping = 1;
5914 return 0;
5915 }
5916
5917 /* There should be enough mappings for all instructions. */
5918 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5919
5920 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5921 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5922 return i;
5923
5924 AssertFailedReturn(1024);
5925}
5926
5927
5928/**
5929 * Commits a bounce buffer that needs writing back and unmaps it.
5930 *
5931 * @returns Strict VBox status code.
5932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5933 * @param iMemMap The index of the buffer to commit.
5934 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5935 * Always false in ring-3, obviously.
5936 */
5937static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5938{
5939 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5940 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5941#ifdef IN_RING3
5942 Assert(!fPostponeFail);
5943 RT_NOREF_PV(fPostponeFail);
5944#endif
5945
5946 /*
5947 * Do the writing.
5948 */
5949 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5950 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5951 {
5952 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5953 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5954 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5955 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5956 {
5957 /*
5958 * Carefully and efficiently dealing with access handler return
5959 * codes make this a little bloated.
5960 */
5961 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5962 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5963 pbBuf,
5964 cbFirst,
5965 PGMACCESSORIGIN_IEM);
5966 if (rcStrict == VINF_SUCCESS)
5967 {
5968 if (cbSecond)
5969 {
5970 rcStrict = PGMPhysWrite(pVM,
5971 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5972 pbBuf + cbFirst,
5973 cbSecond,
5974 PGMACCESSORIGIN_IEM);
5975 if (rcStrict == VINF_SUCCESS)
5976 { /* nothing */ }
5977 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5978 {
5979 LogEx(LOG_GROUP_IEM,
5980 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5981 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5982 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5983 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5984 }
5985#ifndef IN_RING3
5986 else if (fPostponeFail)
5987 {
5988 LogEx(LOG_GROUP_IEM,
5989 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5990 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5991 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5992 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5993 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5994 return iemSetPassUpStatus(pVCpu, rcStrict);
5995 }
5996#endif
5997 else
5998 {
5999 LogEx(LOG_GROUP_IEM,
6000 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6001 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6002 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6003 return rcStrict;
6004 }
6005 }
6006 }
6007 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6008 {
6009 if (!cbSecond)
6010 {
6011 LogEx(LOG_GROUP_IEM,
6012 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6013 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6014 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6015 }
6016 else
6017 {
6018 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6019 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6020 pbBuf + cbFirst,
6021 cbSecond,
6022 PGMACCESSORIGIN_IEM);
6023 if (rcStrict2 == VINF_SUCCESS)
6024 {
6025 LogEx(LOG_GROUP_IEM,
6026 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6027 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6028 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6029 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6030 }
6031 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6032 {
6033 LogEx(LOG_GROUP_IEM,
6034 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6035 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6036 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6037 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6038 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6039 }
6040#ifndef IN_RING3
6041 else if (fPostponeFail)
6042 {
6043 LogEx(LOG_GROUP_IEM,
6044 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6045 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6046 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6047 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6048 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6049 return iemSetPassUpStatus(pVCpu, rcStrict);
6050 }
6051#endif
6052 else
6053 {
6054 LogEx(LOG_GROUP_IEM,
6055 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6056 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6057 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6058 return rcStrict2;
6059 }
6060 }
6061 }
6062#ifndef IN_RING3
6063 else if (fPostponeFail)
6064 {
6065 LogEx(LOG_GROUP_IEM,
6066 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6067 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6068 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6069 if (!cbSecond)
6070 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
6071 else
6072 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
6073 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6074 return iemSetPassUpStatus(pVCpu, rcStrict);
6075 }
6076#endif
6077 else
6078 {
6079 LogEx(LOG_GROUP_IEM,
6080 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6081 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6082 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6083 return rcStrict;
6084 }
6085 }
6086 else
6087 {
6088 /*
6089 * No access handlers, much simpler.
6090 */
6091 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6092 if (RT_SUCCESS(rc))
6093 {
6094 if (cbSecond)
6095 {
6096 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6097 if (RT_SUCCESS(rc))
6098 { /* likely */ }
6099 else
6100 {
6101 LogEx(LOG_GROUP_IEM,
6102 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6103 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6104 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6105 return rc;
6106 }
6107 }
6108 }
6109 else
6110 {
6111 LogEx(LOG_GROUP_IEM,
6112 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6113 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6114 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6115 return rc;
6116 }
6117 }
6118 }
6119
6120#if defined(IEM_LOG_MEMORY_WRITES)
6121 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6122 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6123 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6124 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6125 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6126 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6127
6128 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6129 g_cbIemWrote = cbWrote;
6130 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6131#endif
6132
6133 /*
6134 * Free the mapping entry.
6135 */
6136 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6137 Assert(pVCpu->iem.s.cActiveMappings != 0);
6138 pVCpu->iem.s.cActiveMappings--;
6139 return VINF_SUCCESS;
6140}
6141
6142
6143/**
6144 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
6145 */
6146DECL_FORCE_INLINE(uint32_t)
6147iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
6148{
6149 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
6150 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6151 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6152 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6153}
6154
6155
6156/**
6157 * iemMemMap worker that deals with a request crossing pages.
6158 */
6159static VBOXSTRICTRC
6160iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6161 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6162{
6163 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
6164 Assert(cbMem <= GUEST_PAGE_SIZE);
6165
6166 /*
6167 * Do the address translations.
6168 */
6169 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6170 RTGCPHYS GCPhysFirst;
6171 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6172 if (rcStrict != VINF_SUCCESS)
6173 return rcStrict;
6174 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6175
6176 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6177 RTGCPHYS GCPhysSecond;
6178 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6179 cbSecondPage, fAccess, &GCPhysSecond);
6180 if (rcStrict != VINF_SUCCESS)
6181 return rcStrict;
6182 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6183 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6184
6185 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6186
6187 /*
6188 * Check for data breakpoints.
6189 */
6190 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
6191 { /* likely */ }
6192 else
6193 {
6194 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
6195 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6196 cbSecondPage, fAccess);
6197 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6198 if (fDataBps > 1)
6199 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6200 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6201 }
6202
6203 /*
6204 * Read in the current memory content if it's a read, execute or partial
6205 * write access.
6206 */
6207 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6208
6209 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6210 {
6211 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6212 {
6213 /*
6214 * Must carefully deal with access handler status codes here,
6215 * makes the code a bit bloated.
6216 */
6217 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6218 if (rcStrict == VINF_SUCCESS)
6219 {
6220 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6221 if (rcStrict == VINF_SUCCESS)
6222 { /*likely */ }
6223 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6224 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6225 else
6226 {
6227 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6228 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6229 return rcStrict;
6230 }
6231 }
6232 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6233 {
6234 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6235 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6236 {
6237 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6238 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6239 }
6240 else
6241 {
6242 LogEx(LOG_GROUP_IEM,
6243 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6244 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6245 return rcStrict2;
6246 }
6247 }
6248 else
6249 {
6250 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6251 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6252 return rcStrict;
6253 }
6254 }
6255 else
6256 {
6257 /*
6258 * No informational status codes here, much more straight forward.
6259 */
6260 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6261 if (RT_SUCCESS(rc))
6262 {
6263 Assert(rc == VINF_SUCCESS);
6264 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6265 if (RT_SUCCESS(rc))
6266 Assert(rc == VINF_SUCCESS);
6267 else
6268 {
6269 LogEx(LOG_GROUP_IEM,
6270 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6271 return rc;
6272 }
6273 }
6274 else
6275 {
6276 LogEx(LOG_GROUP_IEM,
6277 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6278 return rc;
6279 }
6280 }
6281 }
6282#ifdef VBOX_STRICT
6283 else
6284 memset(pbBuf, 0xcc, cbMem);
6285 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6286 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6287#endif
6288 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6289
6290 /*
6291 * Commit the bounce buffer entry.
6292 */
6293 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6294 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6295 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6296 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6297 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6298 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6299 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6300 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6301 pVCpu->iem.s.cActiveMappings++;
6302
6303 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6304 *ppvMem = pbBuf;
6305 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6306 return VINF_SUCCESS;
6307}
6308
6309
6310/**
6311 * iemMemMap woker that deals with iemMemPageMap failures.
6312 */
6313static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6314 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6315{
6316 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
6317
6318 /*
6319 * Filter out conditions we can handle and the ones which shouldn't happen.
6320 */
6321 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6322 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6323 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6324 {
6325 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6326 return rcMap;
6327 }
6328 pVCpu->iem.s.cPotentialExits++;
6329
6330 /*
6331 * Read in the current memory content if it's a read, execute or partial
6332 * write access.
6333 */
6334 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6335 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6336 {
6337 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6338 memset(pbBuf, 0xff, cbMem);
6339 else
6340 {
6341 int rc;
6342 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6343 {
6344 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6345 if (rcStrict == VINF_SUCCESS)
6346 { /* nothing */ }
6347 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6348 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6349 else
6350 {
6351 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6352 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6353 return rcStrict;
6354 }
6355 }
6356 else
6357 {
6358 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6359 if (RT_SUCCESS(rc))
6360 { /* likely */ }
6361 else
6362 {
6363 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6364 GCPhysFirst, rc));
6365 return rc;
6366 }
6367 }
6368 }
6369 }
6370#ifdef VBOX_STRICT
6371 else
6372 memset(pbBuf, 0xcc, cbMem);
6373#endif
6374#ifdef VBOX_STRICT
6375 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6376 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6377#endif
6378
6379 /*
6380 * Commit the bounce buffer entry.
6381 */
6382 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6383 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6384 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6385 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6386 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6387 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6388 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6389 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6390 pVCpu->iem.s.cActiveMappings++;
6391
6392 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6393 *ppvMem = pbBuf;
6394 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6395 return VINF_SUCCESS;
6396}
6397
6398
6399
6400/**
6401 * Maps the specified guest memory for the given kind of access.
6402 *
6403 * This may be using bounce buffering of the memory if it's crossing a page
6404 * boundary or if there is an access handler installed for any of it. Because
6405 * of lock prefix guarantees, we're in for some extra clutter when this
6406 * happens.
6407 *
6408 * This may raise a \#GP, \#SS, \#PF or \#AC.
6409 *
6410 * @returns VBox strict status code.
6411 *
6412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6413 * @param ppvMem Where to return the pointer to the mapped memory.
6414 * @param pbUnmapInfo Where to return unmap info to be passed to
6415 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6416 * done.
6417 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6418 * 8, 12, 16, 32 or 512. When used by string operations
6419 * it can be up to a page.
6420 * @param iSegReg The index of the segment register to use for this
6421 * access. The base and limits are checked. Use UINT8_MAX
6422 * to indicate that no segmentation is required (for IDT,
6423 * GDT and LDT accesses).
6424 * @param GCPtrMem The address of the guest memory.
6425 * @param fAccess How the memory is being accessed. The
6426 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6427 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6428 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6429 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6430 * set.
6431 * @param uAlignCtl Alignment control:
6432 * - Bits 15:0 is the alignment mask.
6433 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6434 * IEM_MEMMAP_F_ALIGN_SSE, and
6435 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6436 * Pass zero to skip alignment.
6437 */
6438VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6439 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6440{
6441 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);
6442
6443 /*
6444 * Check the input and figure out which mapping entry to use.
6445 */
6446 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6447 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6448 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6449 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6450 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6451
6452 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6453 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6454 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6455 {
6456 iMemMap = iemMemMapFindFree(pVCpu);
6457 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6458 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6459 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6460 pVCpu->iem.s.aMemMappings[2].fAccess),
6461 VERR_IEM_IPE_9);
6462 }
6463
6464 /*
6465 * Map the memory, checking that we can actually access it. If something
6466 * slightly complicated happens, fall back on bounce buffering.
6467 */
6468 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6469 if (rcStrict == VINF_SUCCESS)
6470 { /* likely */ }
6471 else
6472 return rcStrict;
6473
6474 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6475 { /* likely */ }
6476 else
6477 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6478
6479 /*
6480 * Alignment check.
6481 */
6482 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6483 { /* likelyish */ }
6484 else
6485 {
6486 /* Misaligned access. */
6487 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6488 {
6489 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6490 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6491 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6492 {
6493 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6494
6495 if (!iemMemAreAlignmentChecksEnabled(pVCpu))
6496 { /* likely */ }
6497 else
6498 return iemRaiseAlignmentCheckException(pVCpu);
6499 }
6500 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6501 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6502 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6503 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6504 * that's what FXSAVE does on a 10980xe. */
6505 && iemMemAreAlignmentChecksEnabled(pVCpu))
6506 return iemRaiseAlignmentCheckException(pVCpu);
6507 else
6508 return iemRaiseGeneralProtectionFault0(pVCpu);
6509 }
6510
6511#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6512 /* If the access is atomic there are host platform alignmnet restrictions
6513 we need to conform with. */
6514 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6515# if defined(RT_ARCH_AMD64)
6516 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6517# elif defined(RT_ARCH_ARM64)
6518 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6519# else
6520# error port me
6521# endif
6522 )
6523 { /* okay */ }
6524 else
6525 {
6526 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6527 pVCpu->iem.s.cMisalignedAtomics += 1;
6528 return VINF_EM_EMULATE_SPLIT_LOCK;
6529 }
6530#endif
6531 }
6532
6533#ifdef IEM_WITH_DATA_TLB
6534 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6535
6536 /*
6537 * Get the TLB entry for this page and check PT flags.
6538 *
6539 * We reload the TLB entry if we need to set the dirty bit (accessed
6540 * should in theory always be set).
6541 */
6542 uint8_t *pbMem = NULL;
6543 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
6544 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
6545 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);
6546 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
6547 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
6548 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
6549 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
6550 {
6551# ifdef IEM_WITH_TLB_STATISTICS
6552 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
6553#endif
6554
6555 /* If the page is either supervisor only or non-writable, we need to do
6556 more careful access checks. */
6557 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6558 {
6559 /* Write to read only memory? */
6560 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6561 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6562 && ( ( IEM_GET_CPL(pVCpu) == 3
6563 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6564 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6565 {
6566 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6567 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6568 }
6569
6570 /* Kernel memory accessed by userland? */
6571 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6572 && IEM_GET_CPL(pVCpu) == 3
6573 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6574 {
6575 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6576 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6577 }
6578 }
6579
6580 /* Look up the physical page info if necessary. */
6581 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6582# ifdef IN_RING3
6583 pbMem = pTlbe->pbMappingR3;
6584# else
6585 pbMem = NULL;
6586# endif
6587 else
6588 {
6589 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6590 { /* likely */ }
6591 else
6592 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6593 pTlbe->pbMappingR3 = NULL;
6594 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
6595 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6596 &pbMem, &pTlbe->fFlagsAndPhysRev);
6597 AssertRCReturn(rc, rc);
6598# ifdef IN_RING3
6599 pTlbe->pbMappingR3 = pbMem;
6600# endif
6601 }
6602 }
6603 else
6604 {
6605 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
6606
6607 /* This page table walking will set A bits as required by the access while performing the walk.
6608 ASSUMES these are set when the address is translated rather than on commit... */
6609 /** @todo testcase: check when A bits are actually set by the CPU for code. */
6610 PGMPTWALKFAST WalkFast;
6611 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6612 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6613 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6614 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6615 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6616 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6617 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6618 fQPage |= PGMQPAGE_F_USER_MODE;
6619 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6620 if (RT_SUCCESS(rc))
6621 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6622 else
6623 {
6624 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6625# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6626 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6627 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6628# endif
6629 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6630 }
6631
6632 uint32_t fDataBps;
6633 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
6634 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
6635 {
6636 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
6637 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
6638 {
6639 pTlbe--;
6640 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
6641 }
6642 else
6643 {
6644 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
6645 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
6646 }
6647 }
6648 else
6649 {
6650 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
6651 to the page with the data access breakpoint armed on it to pass thru here. */
6652 if (fDataBps > 1)
6653 LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6654 fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6655 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6656 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
6657 pTlbe->uTag = uTagNoRev;
6658 }
6659 pTlbe->fFlagsAndPhysRev = ~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6660 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
6661 pTlbe->GCPhys = GCPhysPg;
6662 pTlbe->pbMappingR3 = NULL;
6663 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
6664 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6665 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6666 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6667 || IEM_GET_CPL(pVCpu) != 3
6668 || (fAccess & IEM_ACCESS_WHAT_SYS));
6669
6670 /* Resolve the physical address. */
6671 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
6672 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6673 &pbMem, &pTlbe->fFlagsAndPhysRev);
6674 AssertRCReturn(rc, rc);
6675# ifdef IN_RING3
6676 pTlbe->pbMappingR3 = pbMem;
6677# endif
6678 }
6679
6680 /*
6681 * Check the physical page level access and mapping.
6682 */
6683 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6684 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6685 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6686 { /* probably likely */ }
6687 else
6688 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6689 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6690 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6691 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6692 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6693 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6694
6695 if (pbMem)
6696 {
6697 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6698 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6699 fAccess |= IEM_ACCESS_NOT_LOCKED;
6700 }
6701 else
6702 {
6703 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6704 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6705 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6706 if (rcStrict != VINF_SUCCESS)
6707 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6708 }
6709
6710 void * const pvMem = pbMem;
6711
6712 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6713 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6714 if (fAccess & IEM_ACCESS_TYPE_READ)
6715 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6716
6717#else /* !IEM_WITH_DATA_TLB */
6718
6719 RTGCPHYS GCPhysFirst;
6720 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6721 if (rcStrict != VINF_SUCCESS)
6722 return rcStrict;
6723
6724 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6725 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6726 if (fAccess & IEM_ACCESS_TYPE_READ)
6727 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6728
6729 void *pvMem;
6730 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6731 if (rcStrict != VINF_SUCCESS)
6732 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6733
6734#endif /* !IEM_WITH_DATA_TLB */
6735
6736 /*
6737 * Fill in the mapping table entry.
6738 */
6739 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6740 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6741 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6742 pVCpu->iem.s.cActiveMappings += 1;
6743
6744 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6745 *ppvMem = pvMem;
6746 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6747 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6748 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6749
6750 return VINF_SUCCESS;
6751}
6752
6753
6754/**
6755 * Commits the guest memory if bounce buffered and unmaps it.
6756 *
6757 * @returns Strict VBox status code.
6758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6759 * @param bUnmapInfo Unmap info set by iemMemMap.
6760 */
6761VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6762{
6763 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6764 AssertMsgReturn( (bUnmapInfo & 0x08)
6765 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6766 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6767 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6768 VERR_NOT_FOUND);
6769
6770 /* If it's bounce buffered, we may need to write back the buffer. */
6771 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6772 {
6773 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6774 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6775 }
6776 /* Otherwise unlock it. */
6777 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6778 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6779
6780 /* Free the entry. */
6781 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6782 Assert(pVCpu->iem.s.cActiveMappings != 0);
6783 pVCpu->iem.s.cActiveMappings--;
6784 return VINF_SUCCESS;
6785}
6786
6787
6788/**
6789 * Rolls back the guest memory (conceptually only) and unmaps it.
6790 *
6791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6792 * @param bUnmapInfo Unmap info set by iemMemMap.
6793 */
6794void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6795{
6796 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6797 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6798 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6799 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6800 == ((unsigned)bUnmapInfo >> 4),
6801 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6802
6803 /* Unlock it if necessary. */
6804 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6805 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6806
6807 /* Free the entry. */
6808 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6809 Assert(pVCpu->iem.s.cActiveMappings != 0);
6810 pVCpu->iem.s.cActiveMappings--;
6811}
6812
6813#ifdef IEM_WITH_SETJMP
6814
6815/**
6816 * Maps the specified guest memory for the given kind of access, longjmp on
6817 * error.
6818 *
6819 * This may be using bounce buffering of the memory if it's crossing a page
6820 * boundary or if there is an access handler installed for any of it. Because
6821 * of lock prefix guarantees, we're in for some extra clutter when this
6822 * happens.
6823 *
6824 * This may raise a \#GP, \#SS, \#PF or \#AC.
6825 *
6826 * @returns Pointer to the mapped memory.
6827 *
6828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6829 * @param bUnmapInfo Where to return unmap info to be passed to
6830 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6831 * iemMemCommitAndUnmapWoSafeJmp,
6832 * iemMemCommitAndUnmapRoSafeJmp,
6833 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6834 * when done.
6835 * @param cbMem The number of bytes to map. This is usually 1,
6836 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6837 * string operations it can be up to a page.
6838 * @param iSegReg The index of the segment register to use for
6839 * this access. The base and limits are checked.
6840 * Use UINT8_MAX to indicate that no segmentation
6841 * is required (for IDT, GDT and LDT accesses).
6842 * @param GCPtrMem The address of the guest memory.
6843 * @param fAccess How the memory is being accessed. The
6844 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6845 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6846 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6847 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6848 * set.
6849 * @param uAlignCtl Alignment control:
6850 * - Bits 15:0 is the alignment mask.
6851 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6852 * IEM_MEMMAP_F_ALIGN_SSE, and
6853 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6854 * Pass zero to skip alignment.
6855 * @tparam a_fSafe Whether this is a call from "safe" fallback function in
6856 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that
6857 * needs counting as such in the statistics.
6858 */
6859template<bool a_fSafeCall = false>
6860static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6861 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6862{
6863 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);
6864
6865 /*
6866 * Check the input, check segment access and adjust address
6867 * with segment base.
6868 */
6869 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6870 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6871 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6872
6873 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6874 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6875 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6876
6877 /*
6878 * Alignment check.
6879 */
6880 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6881 { /* likelyish */ }
6882 else
6883 {
6884 /* Misaligned access. */
6885 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6886 {
6887 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6888 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6889 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6890 {
6891 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6892
6893 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6894 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6895 }
6896 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6897 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6898 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6899 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6900 * that's what FXSAVE does on a 10980xe. */
6901 && iemMemAreAlignmentChecksEnabled(pVCpu))
6902 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6903 else
6904 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6905 }
6906
6907#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6908 /* If the access is atomic there are host platform alignmnet restrictions
6909 we need to conform with. */
6910 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6911# if defined(RT_ARCH_AMD64)
6912 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6913# elif defined(RT_ARCH_ARM64)
6914 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6915# else
6916# error port me
6917# endif
6918 )
6919 { /* okay */ }
6920 else
6921 {
6922 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6923 pVCpu->iem.s.cMisalignedAtomics += 1;
6924 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
6925 }
6926#endif
6927 }
6928
6929 /*
6930 * Figure out which mapping entry to use.
6931 */
6932 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6933 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6934 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6935 {
6936 iMemMap = iemMemMapFindFree(pVCpu);
6937 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6938 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6939 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6940 pVCpu->iem.s.aMemMappings[2].fAccess),
6941 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6942 }
6943
6944 /*
6945 * Crossing a page boundary?
6946 */
6947 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6948 { /* No (likely). */ }
6949 else
6950 {
6951 void *pvMem;
6952 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6953 if (rcStrict == VINF_SUCCESS)
6954 return pvMem;
6955 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6956 }
6957
6958#ifdef IEM_WITH_DATA_TLB
6959 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6960
6961 /*
6962 * Get the TLB entry for this page checking that it has the A & D bits
6963 * set as per fAccess flags.
6964 */
6965 /** @todo make the caller pass these in with fAccess. */
6966 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6967 ? IEMTLBE_F_PT_NO_USER : 0;
6968 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6969 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6970 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6971 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6972 ? IEMTLBE_F_PT_NO_WRITE : 0)
6973 : 0;
6974 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6975 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
6976 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
6977 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);
6978 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
6979 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
6980 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
6981 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
6982 {
6983# ifdef IEM_WITH_TLB_STATISTICS
6984 if (a_fSafeCall)
6985 pVCpu->iem.s.DataTlb.cTlbSafeHits++;
6986 else
6987 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
6988# endif
6989 }
6990 else
6991 {
6992 if (a_fSafeCall)
6993 pVCpu->iem.s.DataTlb.cTlbSafeMisses++;
6994 else
6995 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
6996
6997 /* This page table walking will set A and D bits as required by the
6998 access while performing the walk.
6999 ASSUMES these are set when the address is translated rather than on commit... */
7000 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7001 PGMPTWALKFAST WalkFast;
7002 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
7003 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
7004 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
7005 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
7006 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
7007 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
7008 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7009 fQPage |= PGMQPAGE_F_USER_MODE;
7010 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
7011 if (RT_SUCCESS(rc))
7012 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
7013 else
7014 {
7015 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
7016# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7017 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
7018 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
7019# endif
7020 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
7021 }
7022
7023 uint32_t fDataBps;
7024 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
7025 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
7026 {
7027 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
7028 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
7029 {
7030 pTlbe--;
7031 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
7032 }
7033 else
7034 {
7035 if (a_fSafeCall)
7036 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
7037 else
7038 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
7039 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
7040 }
7041 }
7042 else
7043 {
7044 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
7045 to the page with the data access breakpoint armed on it to pass thru here. */
7046 if (fDataBps > 1)
7047 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
7048 a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7049 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
7050 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
7051 pTlbe->uTag = uTagNoRev;
7052 }
7053 pTlbe->fFlagsAndPhysRev = ~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
7054 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7055 pTlbe->GCPhys = GCPhysPg;
7056 pTlbe->pbMappingR3 = NULL;
7057 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7058 Assert(!(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE));
7059 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
7060
7061 /* Resolve the physical address. */
7062 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
7063 uint8_t *pbMemFullLoad = NULL;
7064 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7065 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
7066 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7067# ifdef IN_RING3
7068 pTlbe->pbMappingR3 = pbMemFullLoad;
7069# endif
7070 }
7071
7072 /*
7073 * Check the flags and physical revision.
7074 * Note! This will revalidate the uTlbPhysRev after a full load. This is
7075 * just to keep the code structure simple (i.e. avoid gotos or similar).
7076 */
7077 uint8_t *pbMem;
7078 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
7079 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7080# ifdef IN_RING3
7081 pbMem = pTlbe->pbMappingR3;
7082# else
7083 pbMem = NULL;
7084# endif
7085 else
7086 {
7087 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7088
7089 /*
7090 * Okay, something isn't quite right or needs refreshing.
7091 */
7092 /* Write to read only memory? */
7093 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
7094 {
7095 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7096# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7097/** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
7098 * to trigger an \#PG or a VM nested paging exit here yet! */
7099 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7100 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7101# endif
7102 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7103 }
7104
7105 /* Kernel memory accessed by userland? */
7106 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
7107 {
7108 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7109# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7110/** @todo TLB: See above. */
7111 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7112 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7113# endif
7114 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
7115 }
7116
7117 /*
7118 * Check if the physical page info needs updating.
7119 */
7120 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7121# ifdef IN_RING3
7122 pbMem = pTlbe->pbMappingR3;
7123# else
7124 pbMem = NULL;
7125# endif
7126 else
7127 {
7128 pTlbe->pbMappingR3 = NULL;
7129 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
7130 pbMem = NULL;
7131 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7132 &pbMem, &pTlbe->fFlagsAndPhysRev);
7133 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7134# ifdef IN_RING3
7135 pTlbe->pbMappingR3 = pbMem;
7136# endif
7137 }
7138
7139 /*
7140 * Check the physical page level access and mapping.
7141 */
7142 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
7143 { /* probably likely */ }
7144 else
7145 {
7146 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
7147 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
7148 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
7149 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
7150 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
7151 if (rcStrict == VINF_SUCCESS)
7152 return pbMem;
7153 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7154 }
7155 }
7156 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
7157
7158 if (pbMem)
7159 {
7160 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
7161 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7162 fAccess |= IEM_ACCESS_NOT_LOCKED;
7163 }
7164 else
7165 {
7166 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
7167 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7168 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7169 if (rcStrict == VINF_SUCCESS)
7170 {
7171 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7172 return pbMem;
7173 }
7174 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7175 }
7176
7177 void * const pvMem = pbMem;
7178
7179 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7180 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7181 if (fAccess & IEM_ACCESS_TYPE_READ)
7182 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7183
7184#else /* !IEM_WITH_DATA_TLB */
7185
7186
7187 RTGCPHYS GCPhysFirst;
7188 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
7189 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7190 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7191
7192 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7193 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7194 if (fAccess & IEM_ACCESS_TYPE_READ)
7195 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7196
7197 void *pvMem;
7198 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7199 if (rcStrict == VINF_SUCCESS)
7200 { /* likely */ }
7201 else
7202 {
7203 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
7204 if (rcStrict == VINF_SUCCESS)
7205 return pvMem;
7206 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7207 }
7208
7209#endif /* !IEM_WITH_DATA_TLB */
7210
7211 /*
7212 * Fill in the mapping table entry.
7213 */
7214 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
7215 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
7216 pVCpu->iem.s.iNextMapping = iMemMap + 1;
7217 pVCpu->iem.s.cActiveMappings++;
7218
7219 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
7220
7221 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7222 return pvMem;
7223}
7224
7225
7226/** @see iemMemMapJmp */
7227static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7228 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7229{
7230 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);
7231}
7232
7233
7234/**
7235 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
7236 *
7237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7238 * @param pvMem The mapping.
7239 * @param fAccess The kind of access.
7240 */
7241void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7242{
7243 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7244 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7245 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7246 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7247 == ((unsigned)bUnmapInfo >> 4),
7248 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7249
7250 /* If it's bounce buffered, we may need to write back the buffer. */
7251 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7252 {
7253 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7254 {
7255 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7256 if (rcStrict == VINF_SUCCESS)
7257 return;
7258 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7259 }
7260 }
7261 /* Otherwise unlock it. */
7262 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7263 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7264
7265 /* Free the entry. */
7266 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7267 Assert(pVCpu->iem.s.cActiveMappings != 0);
7268 pVCpu->iem.s.cActiveMappings--;
7269}
7270
7271
7272/** Fallback for iemMemCommitAndUnmapRwJmp. */
7273void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7274{
7275 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7276 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7277}
7278
7279
7280/** Fallback for iemMemCommitAndUnmapAtJmp. */
7281void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7282{
7283 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7284 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7285}
7286
7287
7288/** Fallback for iemMemCommitAndUnmapWoJmp. */
7289void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7290{
7291 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7292 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7293}
7294
7295
7296/** Fallback for iemMemCommitAndUnmapRoJmp. */
7297void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7298{
7299 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7300 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7301}
7302
7303
7304/** Fallback for iemMemRollbackAndUnmapWo. */
7305void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7306{
7307 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7308 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7309}
7310
7311#endif /* IEM_WITH_SETJMP */
7312
7313#ifndef IN_RING3
7314/**
7315 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7316 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7317 *
7318 * Allows the instruction to be completed and retired, while the IEM user will
7319 * return to ring-3 immediately afterwards and do the postponed writes there.
7320 *
7321 * @returns VBox status code (no strict statuses). Caller must check
7322 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7324 * @param pvMem The mapping.
7325 * @param fAccess The kind of access.
7326 */
7327VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7328{
7329 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7330 AssertMsgReturn( (bUnmapInfo & 0x08)
7331 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7332 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7333 == ((unsigned)bUnmapInfo >> 4),
7334 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7335 VERR_NOT_FOUND);
7336
7337 /* If it's bounce buffered, we may need to write back the buffer. */
7338 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7339 {
7340 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7341 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7342 }
7343 /* Otherwise unlock it. */
7344 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7345 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7346
7347 /* Free the entry. */
7348 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7349 Assert(pVCpu->iem.s.cActiveMappings != 0);
7350 pVCpu->iem.s.cActiveMappings--;
7351 return VINF_SUCCESS;
7352}
7353#endif
7354
7355
7356/**
7357 * Rollbacks mappings, releasing page locks and such.
7358 *
7359 * The caller shall only call this after checking cActiveMappings.
7360 *
7361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7362 */
7363void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7364{
7365 Assert(pVCpu->iem.s.cActiveMappings > 0);
7366
7367 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7368 while (iMemMap-- > 0)
7369 {
7370 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7371 if (fAccess != IEM_ACCESS_INVALID)
7372 {
7373 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7374 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7375 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7376 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7377 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7378 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7379 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7380 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7381 pVCpu->iem.s.cActiveMappings--;
7382 }
7383 }
7384}
7385
7386
7387/*
7388 * Instantiate R/W templates.
7389 */
7390#define TMPL_MEM_WITH_STACK
7391
7392#define TMPL_MEM_TYPE uint8_t
7393#define TMPL_MEM_FN_SUFF U8
7394#define TMPL_MEM_FMT_TYPE "%#04x"
7395#define TMPL_MEM_FMT_DESC "byte"
7396#include "IEMAllMemRWTmpl.cpp.h"
7397
7398#define TMPL_MEM_TYPE uint16_t
7399#define TMPL_MEM_FN_SUFF U16
7400#define TMPL_MEM_FMT_TYPE "%#06x"
7401#define TMPL_MEM_FMT_DESC "word"
7402#include "IEMAllMemRWTmpl.cpp.h"
7403
7404#define TMPL_WITH_PUSH_SREG
7405#define TMPL_MEM_TYPE uint32_t
7406#define TMPL_MEM_FN_SUFF U32
7407#define TMPL_MEM_FMT_TYPE "%#010x"
7408#define TMPL_MEM_FMT_DESC "dword"
7409#include "IEMAllMemRWTmpl.cpp.h"
7410#undef TMPL_WITH_PUSH_SREG
7411
7412#define TMPL_MEM_TYPE uint64_t
7413#define TMPL_MEM_FN_SUFF U64
7414#define TMPL_MEM_FMT_TYPE "%#018RX64"
7415#define TMPL_MEM_FMT_DESC "qword"
7416#include "IEMAllMemRWTmpl.cpp.h"
7417
7418#undef TMPL_MEM_WITH_STACK
7419
7420#define TMPL_MEM_TYPE uint64_t
7421#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7422#define TMPL_MEM_FN_SUFF U64AlignedU128
7423#define TMPL_MEM_FMT_TYPE "%#018RX64"
7424#define TMPL_MEM_FMT_DESC "qword"
7425#include "IEMAllMemRWTmpl.cpp.h"
7426
7427/* See IEMAllMemRWTmplInline.cpp.h */
7428#define TMPL_MEM_BY_REF
7429
7430#define TMPL_MEM_TYPE RTFLOAT80U
7431#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7432#define TMPL_MEM_FN_SUFF R80
7433#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7434#define TMPL_MEM_FMT_DESC "tword"
7435#include "IEMAllMemRWTmpl.cpp.h"
7436
7437#define TMPL_MEM_TYPE RTPBCD80U
7438#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7439#define TMPL_MEM_FN_SUFF D80
7440#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7441#define TMPL_MEM_FMT_DESC "tword"
7442#include "IEMAllMemRWTmpl.cpp.h"
7443
7444#define TMPL_MEM_TYPE RTUINT128U
7445#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7446#define TMPL_MEM_FN_SUFF U128
7447#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7448#define TMPL_MEM_FMT_DESC "dqword"
7449#include "IEMAllMemRWTmpl.cpp.h"
7450
7451#define TMPL_MEM_TYPE RTUINT128U
7452#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7453#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
7454#define TMPL_MEM_FN_SUFF U128AlignedSse
7455#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7456#define TMPL_MEM_FMT_DESC "dqword"
7457#include "IEMAllMemRWTmpl.cpp.h"
7458
7459#define TMPL_MEM_TYPE RTUINT128U
7460#define TMPL_MEM_TYPE_ALIGN 0
7461#define TMPL_MEM_FN_SUFF U128NoAc
7462#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7463#define TMPL_MEM_FMT_DESC "dqword"
7464#include "IEMAllMemRWTmpl.cpp.h"
7465
7466#define TMPL_MEM_TYPE RTUINT256U
7467#define TMPL_MEM_TYPE_ALIGN 0
7468#define TMPL_MEM_FN_SUFF U256NoAc
7469#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7470#define TMPL_MEM_FMT_DESC "qqword"
7471#include "IEMAllMemRWTmpl.cpp.h"
7472
7473#define TMPL_MEM_TYPE RTUINT256U
7474#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
7475#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
7476#define TMPL_MEM_FN_SUFF U256AlignedAvx
7477#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7478#define TMPL_MEM_FMT_DESC "qqword"
7479#include "IEMAllMemRWTmpl.cpp.h"
7480
7481/**
7482 * Fetches a data dword and zero extends it to a qword.
7483 *
7484 * @returns Strict VBox status code.
7485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7486 * @param pu64Dst Where to return the qword.
7487 * @param iSegReg The index of the segment register to use for
7488 * this access. The base and limits are checked.
7489 * @param GCPtrMem The address of the guest memory.
7490 */
7491VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7492{
7493 /* The lazy approach for now... */
7494 uint8_t bUnmapInfo;
7495 uint32_t const *pu32Src;
7496 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7497 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7498 if (rc == VINF_SUCCESS)
7499 {
7500 *pu64Dst = *pu32Src;
7501 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7502 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7503 }
7504 return rc;
7505}
7506
7507
7508#ifdef SOME_UNUSED_FUNCTION
7509/**
7510 * Fetches a data dword and sign extends it to a qword.
7511 *
7512 * @returns Strict VBox status code.
7513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7514 * @param pu64Dst Where to return the sign extended value.
7515 * @param iSegReg The index of the segment register to use for
7516 * this access. The base and limits are checked.
7517 * @param GCPtrMem The address of the guest memory.
7518 */
7519VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7520{
7521 /* The lazy approach for now... */
7522 uint8_t bUnmapInfo;
7523 int32_t const *pi32Src;
7524 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7525 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7526 if (rc == VINF_SUCCESS)
7527 {
7528 *pu64Dst = *pi32Src;
7529 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7530 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7531 }
7532#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7533 else
7534 *pu64Dst = 0;
7535#endif
7536 return rc;
7537}
7538#endif
7539
7540
7541/**
7542 * Fetches a descriptor register (lgdt, lidt).
7543 *
7544 * @returns Strict VBox status code.
7545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7546 * @param pcbLimit Where to return the limit.
7547 * @param pGCPtrBase Where to return the base.
7548 * @param iSegReg The index of the segment register to use for
7549 * this access. The base and limits are checked.
7550 * @param GCPtrMem The address of the guest memory.
7551 * @param enmOpSize The effective operand size.
7552 */
7553VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7554 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7555{
7556 /*
7557 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7558 * little special:
7559 * - The two reads are done separately.
7560 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7561 * - We suspect the 386 to actually commit the limit before the base in
7562 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7563 * don't try emulate this eccentric behavior, because it's not well
7564 * enough understood and rather hard to trigger.
7565 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7566 */
7567 VBOXSTRICTRC rcStrict;
7568 if (IEM_IS_64BIT_CODE(pVCpu))
7569 {
7570 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7571 if (rcStrict == VINF_SUCCESS)
7572 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7573 }
7574 else
7575 {
7576 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7577 if (enmOpSize == IEMMODE_32BIT)
7578 {
7579 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7580 {
7581 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7582 if (rcStrict == VINF_SUCCESS)
7583 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7584 }
7585 else
7586 {
7587 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7588 if (rcStrict == VINF_SUCCESS)
7589 {
7590 *pcbLimit = (uint16_t)uTmp;
7591 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7592 }
7593 }
7594 if (rcStrict == VINF_SUCCESS)
7595 *pGCPtrBase = uTmp;
7596 }
7597 else
7598 {
7599 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7600 if (rcStrict == VINF_SUCCESS)
7601 {
7602 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7603 if (rcStrict == VINF_SUCCESS)
7604 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7605 }
7606 }
7607 }
7608 return rcStrict;
7609}
7610
7611
7612/**
7613 * Stores a data dqword, SSE aligned.
7614 *
7615 * @returns Strict VBox status code.
7616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7617 * @param iSegReg The index of the segment register to use for
7618 * this access. The base and limits are checked.
7619 * @param GCPtrMem The address of the guest memory.
7620 * @param u128Value The value to store.
7621 */
7622VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7623{
7624 /* The lazy approach for now... */
7625 uint8_t bUnmapInfo;
7626 PRTUINT128U pu128Dst;
7627 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7628 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7629 if (rc == VINF_SUCCESS)
7630 {
7631 pu128Dst->au64[0] = u128Value.au64[0];
7632 pu128Dst->au64[1] = u128Value.au64[1];
7633 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7634 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7635 }
7636 return rc;
7637}
7638
7639
7640#ifdef IEM_WITH_SETJMP
7641/**
7642 * Stores a data dqword, SSE aligned.
7643 *
7644 * @returns Strict VBox status code.
7645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7646 * @param iSegReg The index of the segment register to use for
7647 * this access. The base and limits are checked.
7648 * @param GCPtrMem The address of the guest memory.
7649 * @param u128Value The value to store.
7650 */
7651void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7652 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7653{
7654 /* The lazy approach for now... */
7655 uint8_t bUnmapInfo;
7656 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7657 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7658 pu128Dst->au64[0] = u128Value.au64[0];
7659 pu128Dst->au64[1] = u128Value.au64[1];
7660 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7661 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7662}
7663#endif
7664
7665
7666/**
7667 * Stores a data dqword.
7668 *
7669 * @returns Strict VBox status code.
7670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7671 * @param iSegReg The index of the segment register to use for
7672 * this access. The base and limits are checked.
7673 * @param GCPtrMem The address of the guest memory.
7674 * @param pu256Value Pointer to the value to store.
7675 */
7676VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7677{
7678 /* The lazy approach for now... */
7679 uint8_t bUnmapInfo;
7680 PRTUINT256U pu256Dst;
7681 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7682 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7683 if (rc == VINF_SUCCESS)
7684 {
7685 pu256Dst->au64[0] = pu256Value->au64[0];
7686 pu256Dst->au64[1] = pu256Value->au64[1];
7687 pu256Dst->au64[2] = pu256Value->au64[2];
7688 pu256Dst->au64[3] = pu256Value->au64[3];
7689 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7690 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7691 }
7692 return rc;
7693}
7694
7695
7696#ifdef IEM_WITH_SETJMP
7697/**
7698 * Stores a data dqword, longjmp on error.
7699 *
7700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7701 * @param iSegReg The index of the segment register to use for
7702 * this access. The base and limits are checked.
7703 * @param GCPtrMem The address of the guest memory.
7704 * @param pu256Value Pointer to the value to store.
7705 */
7706void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7707{
7708 /* The lazy approach for now... */
7709 uint8_t bUnmapInfo;
7710 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7711 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7712 pu256Dst->au64[0] = pu256Value->au64[0];
7713 pu256Dst->au64[1] = pu256Value->au64[1];
7714 pu256Dst->au64[2] = pu256Value->au64[2];
7715 pu256Dst->au64[3] = pu256Value->au64[3];
7716 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7717 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7718}
7719#endif
7720
7721
7722/**
7723 * Stores a descriptor register (sgdt, sidt).
7724 *
7725 * @returns Strict VBox status code.
7726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7727 * @param cbLimit The limit.
7728 * @param GCPtrBase The base address.
7729 * @param iSegReg The index of the segment register to use for
7730 * this access. The base and limits are checked.
7731 * @param GCPtrMem The address of the guest memory.
7732 */
7733VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7734{
7735 /*
7736 * The SIDT and SGDT instructions actually stores the data using two
7737 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7738 * does not respond to opsize prefixes.
7739 */
7740 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7741 if (rcStrict == VINF_SUCCESS)
7742 {
7743 if (IEM_IS_16BIT_CODE(pVCpu))
7744 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7745 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7746 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7747 else if (IEM_IS_32BIT_CODE(pVCpu))
7748 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7749 else
7750 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7751 }
7752 return rcStrict;
7753}
7754
7755
7756/**
7757 * Begin a special stack push (used by interrupt, exceptions and such).
7758 *
7759 * This will raise \#SS or \#PF if appropriate.
7760 *
7761 * @returns Strict VBox status code.
7762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7763 * @param cbMem The number of bytes to push onto the stack.
7764 * @param cbAlign The alignment mask (7, 3, 1).
7765 * @param ppvMem Where to return the pointer to the stack memory.
7766 * As with the other memory functions this could be
7767 * direct access or bounce buffered access, so
7768 * don't commit register until the commit call
7769 * succeeds.
7770 * @param pbUnmapInfo Where to store unmap info for
7771 * iemMemStackPushCommitSpecial.
7772 * @param puNewRsp Where to return the new RSP value. This must be
7773 * passed unchanged to
7774 * iemMemStackPushCommitSpecial().
7775 */
7776VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7777 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7778{
7779 Assert(cbMem < UINT8_MAX);
7780 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7781 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7782}
7783
7784
7785/**
7786 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7787 *
7788 * This will update the rSP.
7789 *
7790 * @returns Strict VBox status code.
7791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7792 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7793 * @param uNewRsp The new RSP value returned by
7794 * iemMemStackPushBeginSpecial().
7795 */
7796VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7797{
7798 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7799 if (rcStrict == VINF_SUCCESS)
7800 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7801 return rcStrict;
7802}
7803
7804
7805/**
7806 * Begin a special stack pop (used by iret, retf and such).
7807 *
7808 * This will raise \#SS or \#PF if appropriate.
7809 *
7810 * @returns Strict VBox status code.
7811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7812 * @param cbMem The number of bytes to pop from the stack.
7813 * @param cbAlign The alignment mask (7, 3, 1).
7814 * @param ppvMem Where to return the pointer to the stack memory.
7815 * @param pbUnmapInfo Where to store unmap info for
7816 * iemMemStackPopDoneSpecial.
7817 * @param puNewRsp Where to return the new RSP value. This must be
7818 * assigned to CPUMCTX::rsp manually some time
7819 * after iemMemStackPopDoneSpecial() has been
7820 * called.
7821 */
7822VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7823 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7824{
7825 Assert(cbMem < UINT8_MAX);
7826 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7827 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7828}
7829
7830
7831/**
7832 * Continue a special stack pop (used by iret and retf), for the purpose of
7833 * retrieving a new stack pointer.
7834 *
7835 * This will raise \#SS or \#PF if appropriate.
7836 *
7837 * @returns Strict VBox status code.
7838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7839 * @param off Offset from the top of the stack. This is zero
7840 * except in the retf case.
7841 * @param cbMem The number of bytes to pop from the stack.
7842 * @param ppvMem Where to return the pointer to the stack memory.
7843 * @param pbUnmapInfo Where to store unmap info for
7844 * iemMemStackPopDoneSpecial.
7845 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7846 * return this because all use of this function is
7847 * to retrieve a new value and anything we return
7848 * here would be discarded.)
7849 */
7850VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7851 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7852{
7853 Assert(cbMem < UINT8_MAX);
7854
7855 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7856 RTGCPTR GCPtrTop;
7857 if (IEM_IS_64BIT_CODE(pVCpu))
7858 GCPtrTop = uCurNewRsp;
7859 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7860 GCPtrTop = (uint32_t)uCurNewRsp;
7861 else
7862 GCPtrTop = (uint16_t)uCurNewRsp;
7863
7864 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7865 0 /* checked in iemMemStackPopBeginSpecial */);
7866}
7867
7868
7869/**
7870 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7871 * iemMemStackPopContinueSpecial).
7872 *
7873 * The caller will manually commit the rSP.
7874 *
7875 * @returns Strict VBox status code.
7876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7877 * @param bUnmapInfo Unmap information returned by
7878 * iemMemStackPopBeginSpecial() or
7879 * iemMemStackPopContinueSpecial().
7880 */
7881VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7882{
7883 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7884}
7885
7886
7887/**
7888 * Fetches a system table byte.
7889 *
7890 * @returns Strict VBox status code.
7891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7892 * @param pbDst Where to return the byte.
7893 * @param iSegReg The index of the segment register to use for
7894 * this access. The base and limits are checked.
7895 * @param GCPtrMem The address of the guest memory.
7896 */
7897VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7898{
7899 /* The lazy approach for now... */
7900 uint8_t bUnmapInfo;
7901 uint8_t const *pbSrc;
7902 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7903 if (rc == VINF_SUCCESS)
7904 {
7905 *pbDst = *pbSrc;
7906 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7907 }
7908 return rc;
7909}
7910
7911
7912/**
7913 * Fetches a system table word.
7914 *
7915 * @returns Strict VBox status code.
7916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7917 * @param pu16Dst Where to return the word.
7918 * @param iSegReg The index of the segment register to use for
7919 * this access. The base and limits are checked.
7920 * @param GCPtrMem The address of the guest memory.
7921 */
7922VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7923{
7924 /* The lazy approach for now... */
7925 uint8_t bUnmapInfo;
7926 uint16_t const *pu16Src;
7927 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7928 if (rc == VINF_SUCCESS)
7929 {
7930 *pu16Dst = *pu16Src;
7931 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7932 }
7933 return rc;
7934}
7935
7936
7937/**
7938 * Fetches a system table dword.
7939 *
7940 * @returns Strict VBox status code.
7941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7942 * @param pu32Dst Where to return the dword.
7943 * @param iSegReg The index of the segment register to use for
7944 * this access. The base and limits are checked.
7945 * @param GCPtrMem The address of the guest memory.
7946 */
7947VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7948{
7949 /* The lazy approach for now... */
7950 uint8_t bUnmapInfo;
7951 uint32_t const *pu32Src;
7952 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7953 if (rc == VINF_SUCCESS)
7954 {
7955 *pu32Dst = *pu32Src;
7956 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7957 }
7958 return rc;
7959}
7960
7961
7962/**
7963 * Fetches a system table qword.
7964 *
7965 * @returns Strict VBox status code.
7966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7967 * @param pu64Dst Where to return the qword.
7968 * @param iSegReg The index of the segment register to use for
7969 * this access. The base and limits are checked.
7970 * @param GCPtrMem The address of the guest memory.
7971 */
7972VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7973{
7974 /* The lazy approach for now... */
7975 uint8_t bUnmapInfo;
7976 uint64_t const *pu64Src;
7977 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7978 if (rc == VINF_SUCCESS)
7979 {
7980 *pu64Dst = *pu64Src;
7981 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7982 }
7983 return rc;
7984}
7985
7986
7987/**
7988 * Fetches a descriptor table entry with caller specified error code.
7989 *
7990 * @returns Strict VBox status code.
7991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7992 * @param pDesc Where to return the descriptor table entry.
7993 * @param uSel The selector which table entry to fetch.
7994 * @param uXcpt The exception to raise on table lookup error.
7995 * @param uErrorCode The error code associated with the exception.
7996 */
7997static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7998 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7999{
8000 AssertPtr(pDesc);
8001 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8002
8003 /** @todo did the 286 require all 8 bytes to be accessible? */
8004 /*
8005 * Get the selector table base and check bounds.
8006 */
8007 RTGCPTR GCPtrBase;
8008 if (uSel & X86_SEL_LDT)
8009 {
8010 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8011 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8012 {
8013 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8014 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8015 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8016 uErrorCode, 0);
8017 }
8018
8019 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8020 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8021 }
8022 else
8023 {
8024 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8025 {
8026 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8027 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8028 uErrorCode, 0);
8029 }
8030 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8031 }
8032
8033 /*
8034 * Read the legacy descriptor and maybe the long mode extensions if
8035 * required.
8036 */
8037 VBOXSTRICTRC rcStrict;
8038 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8039 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8040 else
8041 {
8042 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8043 if (rcStrict == VINF_SUCCESS)
8044 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8045 if (rcStrict == VINF_SUCCESS)
8046 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8047 if (rcStrict == VINF_SUCCESS)
8048 pDesc->Legacy.au16[3] = 0;
8049 else
8050 return rcStrict;
8051 }
8052
8053 if (rcStrict == VINF_SUCCESS)
8054 {
8055 if ( !IEM_IS_LONG_MODE(pVCpu)
8056 || pDesc->Legacy.Gen.u1DescType)
8057 pDesc->Long.au64[1] = 0;
8058 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8059 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8060 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8061 else
8062 {
8063 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8064 /** @todo is this the right exception? */
8065 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8066 }
8067 }
8068 return rcStrict;
8069}
8070
8071
8072/**
8073 * Fetches a descriptor table entry.
8074 *
8075 * @returns Strict VBox status code.
8076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8077 * @param pDesc Where to return the descriptor table entry.
8078 * @param uSel The selector which table entry to fetch.
8079 * @param uXcpt The exception to raise on table lookup error.
8080 */
8081VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8082{
8083 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8084}
8085
8086
8087/**
8088 * Marks the selector descriptor as accessed (only non-system descriptors).
8089 *
8090 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8091 * will therefore skip the limit checks.
8092 *
8093 * @returns Strict VBox status code.
8094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8095 * @param uSel The selector.
8096 */
8097VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8098{
8099 /*
8100 * Get the selector table base and calculate the entry address.
8101 */
8102 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8103 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8104 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8105 GCPtr += uSel & X86_SEL_MASK;
8106
8107 /*
8108 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8109 * ugly stuff to avoid this. This will make sure it's an atomic access
8110 * as well more or less remove any question about 8-bit or 32-bit accesss.
8111 */
8112 VBOXSTRICTRC rcStrict;
8113 uint8_t bUnmapInfo;
8114 uint32_t volatile *pu32;
8115 if ((GCPtr & 3) == 0)
8116 {
8117 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8118 GCPtr += 2 + 2;
8119 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8120 if (rcStrict != VINF_SUCCESS)
8121 return rcStrict;
8122 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8123 }
8124 else
8125 {
8126 /* The misaligned GDT/LDT case, map the whole thing. */
8127 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8128 if (rcStrict != VINF_SUCCESS)
8129 return rcStrict;
8130 switch ((uintptr_t)pu32 & 3)
8131 {
8132 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8133 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8134 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8135 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8136 }
8137 }
8138
8139 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8140}
8141
8142
8143#undef LOG_GROUP
8144#define LOG_GROUP LOG_GROUP_IEM
8145
8146/** @} */
8147
8148/** @name Opcode Helpers.
8149 * @{
8150 */
8151
8152/**
8153 * Calculates the effective address of a ModR/M memory operand.
8154 *
8155 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8156 *
8157 * @return Strict VBox status code.
8158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8159 * @param bRm The ModRM byte.
8160 * @param cbImmAndRspOffset - First byte: The size of any immediate
8161 * following the effective address opcode bytes
8162 * (only for RIP relative addressing).
8163 * - Second byte: RSP displacement (for POP [ESP]).
8164 * @param pGCPtrEff Where to return the effective address.
8165 */
8166VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8167{
8168 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8169# define SET_SS_DEF() \
8170 do \
8171 { \
8172 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8173 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8174 } while (0)
8175
8176 if (!IEM_IS_64BIT_CODE(pVCpu))
8177 {
8178/** @todo Check the effective address size crap! */
8179 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8180 {
8181 uint16_t u16EffAddr;
8182
8183 /* Handle the disp16 form with no registers first. */
8184 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8185 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8186 else
8187 {
8188 /* Get the displacment. */
8189 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8190 {
8191 case 0: u16EffAddr = 0; break;
8192 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8193 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8194 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8195 }
8196
8197 /* Add the base and index registers to the disp. */
8198 switch (bRm & X86_MODRM_RM_MASK)
8199 {
8200 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8201 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8202 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8203 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8204 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8205 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8206 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8207 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8208 }
8209 }
8210
8211 *pGCPtrEff = u16EffAddr;
8212 }
8213 else
8214 {
8215 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8216 uint32_t u32EffAddr;
8217
8218 /* Handle the disp32 form with no registers first. */
8219 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8220 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8221 else
8222 {
8223 /* Get the register (or SIB) value. */
8224 switch ((bRm & X86_MODRM_RM_MASK))
8225 {
8226 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8227 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8228 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8229 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8230 case 4: /* SIB */
8231 {
8232 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8233
8234 /* Get the index and scale it. */
8235 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8236 {
8237 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8238 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8239 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8240 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8241 case 4: u32EffAddr = 0; /*none */ break;
8242 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8243 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8244 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8245 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8246 }
8247 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8248
8249 /* add base */
8250 switch (bSib & X86_SIB_BASE_MASK)
8251 {
8252 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8253 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8254 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8255 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8256 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8257 case 5:
8258 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8259 {
8260 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8261 SET_SS_DEF();
8262 }
8263 else
8264 {
8265 uint32_t u32Disp;
8266 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8267 u32EffAddr += u32Disp;
8268 }
8269 break;
8270 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8271 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8272 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8273 }
8274 break;
8275 }
8276 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8277 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8278 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8279 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8280 }
8281
8282 /* Get and add the displacement. */
8283 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8284 {
8285 case 0:
8286 break;
8287 case 1:
8288 {
8289 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8290 u32EffAddr += i8Disp;
8291 break;
8292 }
8293 case 2:
8294 {
8295 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8296 u32EffAddr += u32Disp;
8297 break;
8298 }
8299 default:
8300 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8301 }
8302
8303 }
8304 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8305 *pGCPtrEff = u32EffAddr;
8306 }
8307 }
8308 else
8309 {
8310 uint64_t u64EffAddr;
8311
8312 /* Handle the rip+disp32 form with no registers first. */
8313 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8314 {
8315 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8316 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8317 }
8318 else
8319 {
8320 /* Get the register (or SIB) value. */
8321 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8322 {
8323 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8324 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8325 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8326 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8327 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8328 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8329 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8330 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8331 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8332 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8333 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8334 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8335 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8336 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8337 /* SIB */
8338 case 4:
8339 case 12:
8340 {
8341 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8342
8343 /* Get the index and scale it. */
8344 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8345 {
8346 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8347 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8348 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8349 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8350 case 4: u64EffAddr = 0; /*none */ break;
8351 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8352 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8353 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8354 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8355 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8356 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8357 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8358 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8359 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8360 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8361 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8362 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8363 }
8364 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8365
8366 /* add base */
8367 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8368 {
8369 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8370 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8371 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8372 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8373 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8374 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8375 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8376 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8377 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8378 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8379 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8380 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8381 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8382 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8383 /* complicated encodings */
8384 case 5:
8385 case 13:
8386 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8387 {
8388 if (!pVCpu->iem.s.uRexB)
8389 {
8390 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8391 SET_SS_DEF();
8392 }
8393 else
8394 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8395 }
8396 else
8397 {
8398 uint32_t u32Disp;
8399 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8400 u64EffAddr += (int32_t)u32Disp;
8401 }
8402 break;
8403 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8404 }
8405 break;
8406 }
8407 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8408 }
8409
8410 /* Get and add the displacement. */
8411 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8412 {
8413 case 0:
8414 break;
8415 case 1:
8416 {
8417 int8_t i8Disp;
8418 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8419 u64EffAddr += i8Disp;
8420 break;
8421 }
8422 case 2:
8423 {
8424 uint32_t u32Disp;
8425 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8426 u64EffAddr += (int32_t)u32Disp;
8427 break;
8428 }
8429 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8430 }
8431
8432 }
8433
8434 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8435 *pGCPtrEff = u64EffAddr;
8436 else
8437 {
8438 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8439 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8440 }
8441 }
8442
8443 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8444 return VINF_SUCCESS;
8445}
8446
8447
8448#ifdef IEM_WITH_SETJMP
8449/**
8450 * Calculates the effective address of a ModR/M memory operand.
8451 *
8452 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8453 *
8454 * May longjmp on internal error.
8455 *
8456 * @return The effective address.
8457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8458 * @param bRm The ModRM byte.
8459 * @param cbImmAndRspOffset - First byte: The size of any immediate
8460 * following the effective address opcode bytes
8461 * (only for RIP relative addressing).
8462 * - Second byte: RSP displacement (for POP [ESP]).
8463 */
8464RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8465{
8466 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8467# define SET_SS_DEF() \
8468 do \
8469 { \
8470 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8471 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8472 } while (0)
8473
8474 if (!IEM_IS_64BIT_CODE(pVCpu))
8475 {
8476/** @todo Check the effective address size crap! */
8477 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8478 {
8479 uint16_t u16EffAddr;
8480
8481 /* Handle the disp16 form with no registers first. */
8482 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8483 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8484 else
8485 {
8486 /* Get the displacment. */
8487 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8488 {
8489 case 0: u16EffAddr = 0; break;
8490 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8491 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8492 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8493 }
8494
8495 /* Add the base and index registers to the disp. */
8496 switch (bRm & X86_MODRM_RM_MASK)
8497 {
8498 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8499 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8500 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8501 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8502 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8503 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8504 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8505 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8506 }
8507 }
8508
8509 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8510 return u16EffAddr;
8511 }
8512
8513 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8514 uint32_t u32EffAddr;
8515
8516 /* Handle the disp32 form with no registers first. */
8517 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8518 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8519 else
8520 {
8521 /* Get the register (or SIB) value. */
8522 switch ((bRm & X86_MODRM_RM_MASK))
8523 {
8524 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8525 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8526 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8527 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8528 case 4: /* SIB */
8529 {
8530 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8531
8532 /* Get the index and scale it. */
8533 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8534 {
8535 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8536 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8537 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8538 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8539 case 4: u32EffAddr = 0; /*none */ break;
8540 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8541 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8542 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8543 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8544 }
8545 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8546
8547 /* add base */
8548 switch (bSib & X86_SIB_BASE_MASK)
8549 {
8550 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8551 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8552 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8553 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8554 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8555 case 5:
8556 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8557 {
8558 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8559 SET_SS_DEF();
8560 }
8561 else
8562 {
8563 uint32_t u32Disp;
8564 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8565 u32EffAddr += u32Disp;
8566 }
8567 break;
8568 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8569 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8570 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8571 }
8572 break;
8573 }
8574 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8575 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8576 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8577 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8578 }
8579
8580 /* Get and add the displacement. */
8581 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8582 {
8583 case 0:
8584 break;
8585 case 1:
8586 {
8587 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8588 u32EffAddr += i8Disp;
8589 break;
8590 }
8591 case 2:
8592 {
8593 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8594 u32EffAddr += u32Disp;
8595 break;
8596 }
8597 default:
8598 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8599 }
8600 }
8601
8602 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8603 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8604 return u32EffAddr;
8605 }
8606
8607 uint64_t u64EffAddr;
8608
8609 /* Handle the rip+disp32 form with no registers first. */
8610 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8611 {
8612 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8613 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8614 }
8615 else
8616 {
8617 /* Get the register (or SIB) value. */
8618 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8619 {
8620 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8621 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8622 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8623 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8624 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8625 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8626 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8627 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8628 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8629 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8630 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8631 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8632 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8633 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8634 /* SIB */
8635 case 4:
8636 case 12:
8637 {
8638 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8639
8640 /* Get the index and scale it. */
8641 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8642 {
8643 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8644 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8645 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8646 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8647 case 4: u64EffAddr = 0; /*none */ break;
8648 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8649 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8650 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8651 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8652 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8653 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8654 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8655 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8656 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8657 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8658 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8659 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8660 }
8661 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8662
8663 /* add base */
8664 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8665 {
8666 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8667 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8668 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8669 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8670 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8671 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8672 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8673 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8674 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8675 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8676 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8677 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8678 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8679 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8680 /* complicated encodings */
8681 case 5:
8682 case 13:
8683 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8684 {
8685 if (!pVCpu->iem.s.uRexB)
8686 {
8687 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8688 SET_SS_DEF();
8689 }
8690 else
8691 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8692 }
8693 else
8694 {
8695 uint32_t u32Disp;
8696 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8697 u64EffAddr += (int32_t)u32Disp;
8698 }
8699 break;
8700 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8701 }
8702 break;
8703 }
8704 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8705 }
8706
8707 /* Get and add the displacement. */
8708 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8709 {
8710 case 0:
8711 break;
8712 case 1:
8713 {
8714 int8_t i8Disp;
8715 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8716 u64EffAddr += i8Disp;
8717 break;
8718 }
8719 case 2:
8720 {
8721 uint32_t u32Disp;
8722 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8723 u64EffAddr += (int32_t)u32Disp;
8724 break;
8725 }
8726 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8727 }
8728
8729 }
8730
8731 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8732 {
8733 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8734 return u64EffAddr;
8735 }
8736 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8737 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8738 return u64EffAddr & UINT32_MAX;
8739}
8740#endif /* IEM_WITH_SETJMP */
8741
8742
8743/**
8744 * Calculates the effective address of a ModR/M memory operand, extended version
8745 * for use in the recompilers.
8746 *
8747 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8748 *
8749 * @return Strict VBox status code.
8750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8751 * @param bRm The ModRM byte.
8752 * @param cbImmAndRspOffset - First byte: The size of any immediate
8753 * following the effective address opcode bytes
8754 * (only for RIP relative addressing).
8755 * - Second byte: RSP displacement (for POP [ESP]).
8756 * @param pGCPtrEff Where to return the effective address.
8757 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8758 * SIB byte (bits 39:32).
8759 */
8760VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8761{
8762 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8763# define SET_SS_DEF() \
8764 do \
8765 { \
8766 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8767 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8768 } while (0)
8769
8770 uint64_t uInfo;
8771 if (!IEM_IS_64BIT_CODE(pVCpu))
8772 {
8773/** @todo Check the effective address size crap! */
8774 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8775 {
8776 uint16_t u16EffAddr;
8777
8778 /* Handle the disp16 form with no registers first. */
8779 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8780 {
8781 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8782 uInfo = u16EffAddr;
8783 }
8784 else
8785 {
8786 /* Get the displacment. */
8787 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8788 {
8789 case 0: u16EffAddr = 0; break;
8790 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8791 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8792 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8793 }
8794 uInfo = u16EffAddr;
8795
8796 /* Add the base and index registers to the disp. */
8797 switch (bRm & X86_MODRM_RM_MASK)
8798 {
8799 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8800 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8801 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8802 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8803 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8804 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8805 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8806 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8807 }
8808 }
8809
8810 *pGCPtrEff = u16EffAddr;
8811 }
8812 else
8813 {
8814 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8815 uint32_t u32EffAddr;
8816
8817 /* Handle the disp32 form with no registers first. */
8818 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8819 {
8820 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8821 uInfo = u32EffAddr;
8822 }
8823 else
8824 {
8825 /* Get the register (or SIB) value. */
8826 uInfo = 0;
8827 switch ((bRm & X86_MODRM_RM_MASK))
8828 {
8829 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8830 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8831 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8832 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8833 case 4: /* SIB */
8834 {
8835 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8836 uInfo = (uint64_t)bSib << 32;
8837
8838 /* Get the index and scale it. */
8839 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8840 {
8841 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8842 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8843 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8844 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8845 case 4: u32EffAddr = 0; /*none */ break;
8846 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8847 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8848 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8849 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8850 }
8851 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8852
8853 /* add base */
8854 switch (bSib & X86_SIB_BASE_MASK)
8855 {
8856 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8857 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8858 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8859 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8860 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8861 case 5:
8862 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8863 {
8864 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8865 SET_SS_DEF();
8866 }
8867 else
8868 {
8869 uint32_t u32Disp;
8870 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8871 u32EffAddr += u32Disp;
8872 uInfo |= u32Disp;
8873 }
8874 break;
8875 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8876 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8877 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8878 }
8879 break;
8880 }
8881 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8882 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8883 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8884 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8885 }
8886
8887 /* Get and add the displacement. */
8888 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8889 {
8890 case 0:
8891 break;
8892 case 1:
8893 {
8894 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8895 u32EffAddr += i8Disp;
8896 uInfo |= (uint32_t)(int32_t)i8Disp;
8897 break;
8898 }
8899 case 2:
8900 {
8901 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8902 u32EffAddr += u32Disp;
8903 uInfo |= (uint32_t)u32Disp;
8904 break;
8905 }
8906 default:
8907 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8908 }
8909
8910 }
8911 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8912 *pGCPtrEff = u32EffAddr;
8913 }
8914 }
8915 else
8916 {
8917 uint64_t u64EffAddr;
8918
8919 /* Handle the rip+disp32 form with no registers first. */
8920 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8921 {
8922 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8923 uInfo = (uint32_t)u64EffAddr;
8924 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8925 }
8926 else
8927 {
8928 /* Get the register (or SIB) value. */
8929 uInfo = 0;
8930 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8931 {
8932 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8933 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8934 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8935 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8936 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8937 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8938 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8939 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8940 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8941 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8942 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8943 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8944 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8945 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8946 /* SIB */
8947 case 4:
8948 case 12:
8949 {
8950 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8951 uInfo = (uint64_t)bSib << 32;
8952
8953 /* Get the index and scale it. */
8954 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8955 {
8956 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8957 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8958 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8959 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8960 case 4: u64EffAddr = 0; /*none */ break;
8961 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8962 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8963 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8964 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8965 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8966 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8967 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8968 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8969 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8970 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8971 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8972 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8973 }
8974 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8975
8976 /* add base */
8977 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8978 {
8979 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8980 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8981 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8982 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8983 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8984 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8985 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8986 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8987 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8988 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8989 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8990 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8991 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8992 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8993 /* complicated encodings */
8994 case 5:
8995 case 13:
8996 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8997 {
8998 if (!pVCpu->iem.s.uRexB)
8999 {
9000 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9001 SET_SS_DEF();
9002 }
9003 else
9004 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9005 }
9006 else
9007 {
9008 uint32_t u32Disp;
9009 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9010 u64EffAddr += (int32_t)u32Disp;
9011 uInfo |= u32Disp;
9012 }
9013 break;
9014 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9015 }
9016 break;
9017 }
9018 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9019 }
9020
9021 /* Get and add the displacement. */
9022 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9023 {
9024 case 0:
9025 break;
9026 case 1:
9027 {
9028 int8_t i8Disp;
9029 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9030 u64EffAddr += i8Disp;
9031 uInfo |= (uint32_t)(int32_t)i8Disp;
9032 break;
9033 }
9034 case 2:
9035 {
9036 uint32_t u32Disp;
9037 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9038 u64EffAddr += (int32_t)u32Disp;
9039 uInfo |= u32Disp;
9040 break;
9041 }
9042 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9043 }
9044
9045 }
9046
9047 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9048 *pGCPtrEff = u64EffAddr;
9049 else
9050 {
9051 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9052 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9053 }
9054 }
9055 *puInfo = uInfo;
9056
9057 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9058 return VINF_SUCCESS;
9059}
9060
9061/** @} */
9062
9063
9064#ifdef LOG_ENABLED
9065/**
9066 * Logs the current instruction.
9067 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9068 * @param fSameCtx Set if we have the same context information as the VMM,
9069 * clear if we may have already executed an instruction in
9070 * our debug context. When clear, we assume IEMCPU holds
9071 * valid CPU mode info.
9072 *
9073 * The @a fSameCtx parameter is now misleading and obsolete.
9074 * @param pszFunction The IEM function doing the execution.
9075 */
9076static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9077{
9078# ifdef IN_RING3
9079 if (LogIs2Enabled())
9080 {
9081 char szInstr[256];
9082 uint32_t cbInstr = 0;
9083 if (fSameCtx)
9084 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9085 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9086 szInstr, sizeof(szInstr), &cbInstr);
9087 else
9088 {
9089 uint32_t fFlags = 0;
9090 switch (IEM_GET_CPU_MODE(pVCpu))
9091 {
9092 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9093 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9094 case IEMMODE_16BIT:
9095 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9096 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9097 else
9098 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9099 break;
9100 }
9101 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9102 szInstr, sizeof(szInstr), &cbInstr);
9103 }
9104
9105 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9106 Log2(("**** %s fExec=%x\n"
9107 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9108 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9109 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9110 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9111 " %s\n"
9112 , pszFunction, pVCpu->iem.s.fExec,
9113 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9114 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9115 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9116 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9117 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9118 szInstr));
9119
9120 /* This stuff sucks atm. as it fills the log with MSRs. */
9121 //if (LogIs3Enabled())
9122 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9123 }
9124 else
9125# endif
9126 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9127 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9128 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9129}
9130#endif /* LOG_ENABLED */
9131
9132
9133#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9134/**
9135 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9136 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9137 *
9138 * @returns Modified rcStrict.
9139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9140 * @param rcStrict The instruction execution status.
9141 */
9142static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9143{
9144 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9145 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9146 {
9147 /* VMX preemption timer takes priority over NMI-window exits. */
9148 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9149 {
9150 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9151 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9152 }
9153 /*
9154 * Check remaining intercepts.
9155 *
9156 * NMI-window and Interrupt-window VM-exits.
9157 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9158 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9159 *
9160 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9161 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9162 */
9163 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9164 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9165 && !TRPMHasTrap(pVCpu))
9166 {
9167 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9168 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9169 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9170 {
9171 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9172 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9173 }
9174 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9175 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9176 {
9177 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9178 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9179 }
9180 }
9181 }
9182 /* TPR-below threshold/APIC write has the highest priority. */
9183 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9184 {
9185 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9186 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9187 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9188 }
9189 /* MTF takes priority over VMX-preemption timer. */
9190 else
9191 {
9192 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9193 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9194 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9195 }
9196 return rcStrict;
9197}
9198#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9199
9200
9201/**
9202 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9203 * IEMExecOneWithPrefetchedByPC.
9204 *
9205 * Similar code is found in IEMExecLots.
9206 *
9207 * @return Strict VBox status code.
9208 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9209 * @param fExecuteInhibit If set, execute the instruction following CLI,
9210 * POP SS and MOV SS,GR.
9211 * @param pszFunction The calling function name.
9212 */
9213DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9214{
9215 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9216 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9217 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9218 RT_NOREF_PV(pszFunction);
9219
9220#ifdef IEM_WITH_SETJMP
9221 VBOXSTRICTRC rcStrict;
9222 IEM_TRY_SETJMP(pVCpu, rcStrict)
9223 {
9224 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9225 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9226 }
9227 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9228 {
9229 pVCpu->iem.s.cLongJumps++;
9230 }
9231 IEM_CATCH_LONGJMP_END(pVCpu);
9232#else
9233 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9234 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9235#endif
9236 if (rcStrict == VINF_SUCCESS)
9237 pVCpu->iem.s.cInstructions++;
9238 if (pVCpu->iem.s.cActiveMappings > 0)
9239 {
9240 Assert(rcStrict != VINF_SUCCESS);
9241 iemMemRollback(pVCpu);
9242 }
9243 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9244 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9245 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9246
9247//#ifdef DEBUG
9248// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9249//#endif
9250
9251#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9252 /*
9253 * Perform any VMX nested-guest instruction boundary actions.
9254 *
9255 * If any of these causes a VM-exit, we must skip executing the next
9256 * instruction (would run into stale page tables). A VM-exit makes sure
9257 * there is no interrupt-inhibition, so that should ensure we don't go
9258 * to try execute the next instruction. Clearing fExecuteInhibit is
9259 * problematic because of the setjmp/longjmp clobbering above.
9260 */
9261 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9262 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9263 || rcStrict != VINF_SUCCESS)
9264 { /* likely */ }
9265 else
9266 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9267#endif
9268
9269 /* Execute the next instruction as well if a cli, pop ss or
9270 mov ss, Gr has just completed successfully. */
9271 if ( fExecuteInhibit
9272 && rcStrict == VINF_SUCCESS
9273 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9274 {
9275 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9276 if (rcStrict == VINF_SUCCESS)
9277 {
9278#ifdef LOG_ENABLED
9279 iemLogCurInstr(pVCpu, false, pszFunction);
9280#endif
9281#ifdef IEM_WITH_SETJMP
9282 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9283 {
9284 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9285 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9286 }
9287 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9288 {
9289 pVCpu->iem.s.cLongJumps++;
9290 }
9291 IEM_CATCH_LONGJMP_END(pVCpu);
9292#else
9293 IEM_OPCODE_GET_FIRST_U8(&b);
9294 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9295#endif
9296 if (rcStrict == VINF_SUCCESS)
9297 {
9298 pVCpu->iem.s.cInstructions++;
9299#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9300 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9301 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9302 { /* likely */ }
9303 else
9304 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9305#endif
9306 }
9307 if (pVCpu->iem.s.cActiveMappings > 0)
9308 {
9309 Assert(rcStrict != VINF_SUCCESS);
9310 iemMemRollback(pVCpu);
9311 }
9312 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9313 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9314 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9315 }
9316 else if (pVCpu->iem.s.cActiveMappings > 0)
9317 iemMemRollback(pVCpu);
9318 /** @todo drop this after we bake this change into RIP advancing. */
9319 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9320 }
9321
9322 /*
9323 * Return value fiddling, statistics and sanity assertions.
9324 */
9325 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9326
9327 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9328 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9329 return rcStrict;
9330}
9331
9332
9333/**
9334 * Execute one instruction.
9335 *
9336 * @return Strict VBox status code.
9337 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9338 */
9339VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9340{
9341 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9342#ifdef LOG_ENABLED
9343 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9344#endif
9345
9346 /*
9347 * Do the decoding and emulation.
9348 */
9349 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9350 if (rcStrict == VINF_SUCCESS)
9351 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9352 else if (pVCpu->iem.s.cActiveMappings > 0)
9353 iemMemRollback(pVCpu);
9354
9355 if (rcStrict != VINF_SUCCESS)
9356 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9357 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9358 return rcStrict;
9359}
9360
9361
9362VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9363{
9364 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9365 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9366 if (rcStrict == VINF_SUCCESS)
9367 {
9368 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9369 if (pcbWritten)
9370 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9371 }
9372 else if (pVCpu->iem.s.cActiveMappings > 0)
9373 iemMemRollback(pVCpu);
9374
9375 return rcStrict;
9376}
9377
9378
9379VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9380 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9381{
9382 VBOXSTRICTRC rcStrict;
9383 if ( cbOpcodeBytes
9384 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9385 {
9386 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9387#ifdef IEM_WITH_CODE_TLB
9388 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9389 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9390 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9391 pVCpu->iem.s.offCurInstrStart = 0;
9392 pVCpu->iem.s.offInstrNextByte = 0;
9393 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9394#else
9395 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9396 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9397#endif
9398 rcStrict = VINF_SUCCESS;
9399 }
9400 else
9401 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9402 if (rcStrict == VINF_SUCCESS)
9403 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9404 else if (pVCpu->iem.s.cActiveMappings > 0)
9405 iemMemRollback(pVCpu);
9406
9407 return rcStrict;
9408}
9409
9410
9411VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9412{
9413 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9414 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9415 if (rcStrict == VINF_SUCCESS)
9416 {
9417 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9418 if (pcbWritten)
9419 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9420 }
9421 else if (pVCpu->iem.s.cActiveMappings > 0)
9422 iemMemRollback(pVCpu);
9423
9424 return rcStrict;
9425}
9426
9427
9428VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9429 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9430{
9431 VBOXSTRICTRC rcStrict;
9432 if ( cbOpcodeBytes
9433 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9434 {
9435 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9436#ifdef IEM_WITH_CODE_TLB
9437 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9438 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9439 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9440 pVCpu->iem.s.offCurInstrStart = 0;
9441 pVCpu->iem.s.offInstrNextByte = 0;
9442 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9443#else
9444 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9445 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9446#endif
9447 rcStrict = VINF_SUCCESS;
9448 }
9449 else
9450 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9451 if (rcStrict == VINF_SUCCESS)
9452 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9453 else if (pVCpu->iem.s.cActiveMappings > 0)
9454 iemMemRollback(pVCpu);
9455
9456 return rcStrict;
9457}
9458
9459
9460/**
9461 * For handling split cacheline lock operations when the host has split-lock
9462 * detection enabled.
9463 *
9464 * This will cause the interpreter to disregard the lock prefix and implicit
9465 * locking (xchg).
9466 *
9467 * @returns Strict VBox status code.
9468 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9469 */
9470VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9471{
9472 /*
9473 * Do the decoding and emulation.
9474 */
9475 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9476 if (rcStrict == VINF_SUCCESS)
9477 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9478 else if (pVCpu->iem.s.cActiveMappings > 0)
9479 iemMemRollback(pVCpu);
9480
9481 if (rcStrict != VINF_SUCCESS)
9482 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9483 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9484 return rcStrict;
9485}
9486
9487
9488/**
9489 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9490 * inject a pending TRPM trap.
9491 */
9492VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9493{
9494 Assert(TRPMHasTrap(pVCpu));
9495
9496 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9497 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9498 {
9499 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9500#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9501 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9502 if (fIntrEnabled)
9503 {
9504 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9505 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9506 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9507 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9508 else
9509 {
9510 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9511 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9512 }
9513 }
9514#else
9515 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9516#endif
9517 if (fIntrEnabled)
9518 {
9519 uint8_t u8TrapNo;
9520 TRPMEVENT enmType;
9521 uint32_t uErrCode;
9522 RTGCPTR uCr2;
9523 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9524 AssertRC(rc2);
9525 Assert(enmType == TRPM_HARDWARE_INT);
9526 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9527
9528 TRPMResetTrap(pVCpu);
9529
9530#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9531 /* Injecting an event may cause a VM-exit. */
9532 if ( rcStrict != VINF_SUCCESS
9533 && rcStrict != VINF_IEM_RAISED_XCPT)
9534 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9535#else
9536 NOREF(rcStrict);
9537#endif
9538 }
9539 }
9540
9541 return VINF_SUCCESS;
9542}
9543
9544
9545VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9546{
9547 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9548 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9549 Assert(cMaxInstructions > 0);
9550
9551 /*
9552 * See if there is an interrupt pending in TRPM, inject it if we can.
9553 */
9554 /** @todo What if we are injecting an exception and not an interrupt? Is that
9555 * possible here? For now we assert it is indeed only an interrupt. */
9556 if (!TRPMHasTrap(pVCpu))
9557 { /* likely */ }
9558 else
9559 {
9560 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9561 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9562 { /*likely */ }
9563 else
9564 return rcStrict;
9565 }
9566
9567 /*
9568 * Initial decoder init w/ prefetch, then setup setjmp.
9569 */
9570 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9571 if (rcStrict == VINF_SUCCESS)
9572 {
9573#ifdef IEM_WITH_SETJMP
9574 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9575 IEM_TRY_SETJMP(pVCpu, rcStrict)
9576#endif
9577 {
9578 /*
9579 * The run loop. We limit ourselves to 4096 instructions right now.
9580 */
9581 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9582 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9583 for (;;)
9584 {
9585 /*
9586 * Log the state.
9587 */
9588#ifdef LOG_ENABLED
9589 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9590#endif
9591
9592 /*
9593 * Do the decoding and emulation.
9594 */
9595 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9596 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9597#ifdef VBOX_STRICT
9598 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9599#endif
9600 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9601 {
9602 Assert(pVCpu->iem.s.cActiveMappings == 0);
9603 pVCpu->iem.s.cInstructions++;
9604
9605#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9606 /* Perform any VMX nested-guest instruction boundary actions. */
9607 uint64_t fCpu = pVCpu->fLocalForcedActions;
9608 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9609 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9610 { /* likely */ }
9611 else
9612 {
9613 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9614 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9615 fCpu = pVCpu->fLocalForcedActions;
9616 else
9617 {
9618 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9619 break;
9620 }
9621 }
9622#endif
9623 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9624 {
9625#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9626 uint64_t fCpu = pVCpu->fLocalForcedActions;
9627#endif
9628 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9629 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9630 | VMCPU_FF_TLB_FLUSH
9631 | VMCPU_FF_UNHALT );
9632
9633 if (RT_LIKELY( ( !fCpu
9634 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9635 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9636 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9637 {
9638 if (--cMaxInstructionsGccStupidity > 0)
9639 {
9640 /* Poll timers every now an then according to the caller's specs. */
9641 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9642 || !TMTimerPollBool(pVM, pVCpu))
9643 {
9644 Assert(pVCpu->iem.s.cActiveMappings == 0);
9645 iemReInitDecoder(pVCpu);
9646 continue;
9647 }
9648 }
9649 }
9650 }
9651 Assert(pVCpu->iem.s.cActiveMappings == 0);
9652 }
9653 else if (pVCpu->iem.s.cActiveMappings > 0)
9654 iemMemRollback(pVCpu);
9655 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9656 break;
9657 }
9658 }
9659#ifdef IEM_WITH_SETJMP
9660 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9661 {
9662 if (pVCpu->iem.s.cActiveMappings > 0)
9663 iemMemRollback(pVCpu);
9664# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9665 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9666# endif
9667 pVCpu->iem.s.cLongJumps++;
9668 }
9669 IEM_CATCH_LONGJMP_END(pVCpu);
9670#endif
9671
9672 /*
9673 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9674 */
9675 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9676 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9677 }
9678 else
9679 {
9680 if (pVCpu->iem.s.cActiveMappings > 0)
9681 iemMemRollback(pVCpu);
9682
9683#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9684 /*
9685 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9686 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9687 */
9688 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9689#endif
9690 }
9691
9692 /*
9693 * Maybe re-enter raw-mode and log.
9694 */
9695 if (rcStrict != VINF_SUCCESS)
9696 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9697 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9698 if (pcInstructions)
9699 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9700 return rcStrict;
9701}
9702
9703
9704/**
9705 * Interface used by EMExecuteExec, does exit statistics and limits.
9706 *
9707 * @returns Strict VBox status code.
9708 * @param pVCpu The cross context virtual CPU structure.
9709 * @param fWillExit To be defined.
9710 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9711 * @param cMaxInstructions Maximum number of instructions to execute.
9712 * @param cMaxInstructionsWithoutExits
9713 * The max number of instructions without exits.
9714 * @param pStats Where to return statistics.
9715 */
9716VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9717 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9718{
9719 NOREF(fWillExit); /** @todo define flexible exit crits */
9720
9721 /*
9722 * Initialize return stats.
9723 */
9724 pStats->cInstructions = 0;
9725 pStats->cExits = 0;
9726 pStats->cMaxExitDistance = 0;
9727 pStats->cReserved = 0;
9728
9729 /*
9730 * Initial decoder init w/ prefetch, then setup setjmp.
9731 */
9732 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9733 if (rcStrict == VINF_SUCCESS)
9734 {
9735#ifdef IEM_WITH_SETJMP
9736 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9737 IEM_TRY_SETJMP(pVCpu, rcStrict)
9738#endif
9739 {
9740#ifdef IN_RING0
9741 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9742#endif
9743 uint32_t cInstructionSinceLastExit = 0;
9744
9745 /*
9746 * The run loop. We limit ourselves to 4096 instructions right now.
9747 */
9748 PVM pVM = pVCpu->CTX_SUFF(pVM);
9749 for (;;)
9750 {
9751 /*
9752 * Log the state.
9753 */
9754#ifdef LOG_ENABLED
9755 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9756#endif
9757
9758 /*
9759 * Do the decoding and emulation.
9760 */
9761 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9762
9763 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9764 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9765
9766 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9767 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9768 {
9769 pStats->cExits += 1;
9770 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9771 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9772 cInstructionSinceLastExit = 0;
9773 }
9774
9775 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9776 {
9777 Assert(pVCpu->iem.s.cActiveMappings == 0);
9778 pVCpu->iem.s.cInstructions++;
9779 pStats->cInstructions++;
9780 cInstructionSinceLastExit++;
9781
9782#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9783 /* Perform any VMX nested-guest instruction boundary actions. */
9784 uint64_t fCpu = pVCpu->fLocalForcedActions;
9785 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9786 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9787 { /* likely */ }
9788 else
9789 {
9790 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9791 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9792 fCpu = pVCpu->fLocalForcedActions;
9793 else
9794 {
9795 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9796 break;
9797 }
9798 }
9799#endif
9800 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9801 {
9802#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9803 uint64_t fCpu = pVCpu->fLocalForcedActions;
9804#endif
9805 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9806 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9807 | VMCPU_FF_TLB_FLUSH
9808 | VMCPU_FF_UNHALT );
9809 if (RT_LIKELY( ( ( !fCpu
9810 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9811 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9812 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9813 || pStats->cInstructions < cMinInstructions))
9814 {
9815 if (pStats->cInstructions < cMaxInstructions)
9816 {
9817 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9818 {
9819#ifdef IN_RING0
9820 if ( !fCheckPreemptionPending
9821 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9822#endif
9823 {
9824 Assert(pVCpu->iem.s.cActiveMappings == 0);
9825 iemReInitDecoder(pVCpu);
9826 continue;
9827 }
9828#ifdef IN_RING0
9829 rcStrict = VINF_EM_RAW_INTERRUPT;
9830 break;
9831#endif
9832 }
9833 }
9834 }
9835 Assert(!(fCpu & VMCPU_FF_IEM));
9836 }
9837 Assert(pVCpu->iem.s.cActiveMappings == 0);
9838 }
9839 else if (pVCpu->iem.s.cActiveMappings > 0)
9840 iemMemRollback(pVCpu);
9841 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9842 break;
9843 }
9844 }
9845#ifdef IEM_WITH_SETJMP
9846 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9847 {
9848 if (pVCpu->iem.s.cActiveMappings > 0)
9849 iemMemRollback(pVCpu);
9850 pVCpu->iem.s.cLongJumps++;
9851 }
9852 IEM_CATCH_LONGJMP_END(pVCpu);
9853#endif
9854
9855 /*
9856 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9857 */
9858 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9859 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9860 }
9861 else
9862 {
9863 if (pVCpu->iem.s.cActiveMappings > 0)
9864 iemMemRollback(pVCpu);
9865
9866#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9867 /*
9868 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9869 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9870 */
9871 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9872#endif
9873 }
9874
9875 /*
9876 * Maybe re-enter raw-mode and log.
9877 */
9878 if (rcStrict != VINF_SUCCESS)
9879 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9880 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9881 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9882 return rcStrict;
9883}
9884
9885
9886/**
9887 * Injects a trap, fault, abort, software interrupt or external interrupt.
9888 *
9889 * The parameter list matches TRPMQueryTrapAll pretty closely.
9890 *
9891 * @returns Strict VBox status code.
9892 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9893 * @param u8TrapNo The trap number.
9894 * @param enmType What type is it (trap/fault/abort), software
9895 * interrupt or hardware interrupt.
9896 * @param uErrCode The error code if applicable.
9897 * @param uCr2 The CR2 value if applicable.
9898 * @param cbInstr The instruction length (only relevant for
9899 * software interrupts).
9900 */
9901VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9902 uint8_t cbInstr)
9903{
9904 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9905#ifdef DBGFTRACE_ENABLED
9906 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9907 u8TrapNo, enmType, uErrCode, uCr2);
9908#endif
9909
9910 uint32_t fFlags;
9911 switch (enmType)
9912 {
9913 case TRPM_HARDWARE_INT:
9914 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9915 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9916 uErrCode = uCr2 = 0;
9917 break;
9918
9919 case TRPM_SOFTWARE_INT:
9920 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9921 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9922 uErrCode = uCr2 = 0;
9923 break;
9924
9925 case TRPM_TRAP:
9926 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
9927 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9928 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9929 if (u8TrapNo == X86_XCPT_PF)
9930 fFlags |= IEM_XCPT_FLAGS_CR2;
9931 switch (u8TrapNo)
9932 {
9933 case X86_XCPT_DF:
9934 case X86_XCPT_TS:
9935 case X86_XCPT_NP:
9936 case X86_XCPT_SS:
9937 case X86_XCPT_PF:
9938 case X86_XCPT_AC:
9939 case X86_XCPT_GP:
9940 fFlags |= IEM_XCPT_FLAGS_ERR;
9941 break;
9942 }
9943 break;
9944
9945 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9946 }
9947
9948 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9949
9950 if (pVCpu->iem.s.cActiveMappings > 0)
9951 iemMemRollback(pVCpu);
9952
9953 return rcStrict;
9954}
9955
9956
9957/**
9958 * Injects the active TRPM event.
9959 *
9960 * @returns Strict VBox status code.
9961 * @param pVCpu The cross context virtual CPU structure.
9962 */
9963VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9964{
9965#ifndef IEM_IMPLEMENTS_TASKSWITCH
9966 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9967#else
9968 uint8_t u8TrapNo;
9969 TRPMEVENT enmType;
9970 uint32_t uErrCode;
9971 RTGCUINTPTR uCr2;
9972 uint8_t cbInstr;
9973 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9974 if (RT_FAILURE(rc))
9975 return rc;
9976
9977 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9978 * ICEBP \#DB injection as a special case. */
9979 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9980#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9981 if (rcStrict == VINF_SVM_VMEXIT)
9982 rcStrict = VINF_SUCCESS;
9983#endif
9984#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9985 if (rcStrict == VINF_VMX_VMEXIT)
9986 rcStrict = VINF_SUCCESS;
9987#endif
9988 /** @todo Are there any other codes that imply the event was successfully
9989 * delivered to the guest? See @bugref{6607}. */
9990 if ( rcStrict == VINF_SUCCESS
9991 || rcStrict == VINF_IEM_RAISED_XCPT)
9992 TRPMResetTrap(pVCpu);
9993
9994 return rcStrict;
9995#endif
9996}
9997
9998
9999VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10000{
10001 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10002 return VERR_NOT_IMPLEMENTED;
10003}
10004
10005
10006VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10007{
10008 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10009 return VERR_NOT_IMPLEMENTED;
10010}
10011
10012
10013/**
10014 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10015 *
10016 * This API ASSUMES that the caller has already verified that the guest code is
10017 * allowed to access the I/O port. (The I/O port is in the DX register in the
10018 * guest state.)
10019 *
10020 * @returns Strict VBox status code.
10021 * @param pVCpu The cross context virtual CPU structure.
10022 * @param cbValue The size of the I/O port access (1, 2, or 4).
10023 * @param enmAddrMode The addressing mode.
10024 * @param fRepPrefix Indicates whether a repeat prefix is used
10025 * (doesn't matter which for this instruction).
10026 * @param cbInstr The instruction length in bytes.
10027 * @param iEffSeg The effective segment address.
10028 * @param fIoChecked Whether the access to the I/O port has been
10029 * checked or not. It's typically checked in the
10030 * HM scenario.
10031 */
10032VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10033 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10034{
10035 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10036 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10037
10038 /*
10039 * State init.
10040 */
10041 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10042
10043 /*
10044 * Switch orgy for getting to the right handler.
10045 */
10046 VBOXSTRICTRC rcStrict;
10047 if (fRepPrefix)
10048 {
10049 switch (enmAddrMode)
10050 {
10051 case IEMMODE_16BIT:
10052 switch (cbValue)
10053 {
10054 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10055 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10056 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10057 default:
10058 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10059 }
10060 break;
10061
10062 case IEMMODE_32BIT:
10063 switch (cbValue)
10064 {
10065 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10066 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10067 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10068 default:
10069 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10070 }
10071 break;
10072
10073 case IEMMODE_64BIT:
10074 switch (cbValue)
10075 {
10076 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10077 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10078 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10079 default:
10080 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10081 }
10082 break;
10083
10084 default:
10085 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10086 }
10087 }
10088 else
10089 {
10090 switch (enmAddrMode)
10091 {
10092 case IEMMODE_16BIT:
10093 switch (cbValue)
10094 {
10095 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10096 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10097 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10098 default:
10099 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10100 }
10101 break;
10102
10103 case IEMMODE_32BIT:
10104 switch (cbValue)
10105 {
10106 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10107 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10108 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10109 default:
10110 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10111 }
10112 break;
10113
10114 case IEMMODE_64BIT:
10115 switch (cbValue)
10116 {
10117 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10118 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10119 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10120 default:
10121 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10122 }
10123 break;
10124
10125 default:
10126 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10127 }
10128 }
10129
10130 if (pVCpu->iem.s.cActiveMappings)
10131 iemMemRollback(pVCpu);
10132
10133 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10134}
10135
10136
10137/**
10138 * Interface for HM and EM for executing string I/O IN (read) instructions.
10139 *
10140 * This API ASSUMES that the caller has already verified that the guest code is
10141 * allowed to access the I/O port. (The I/O port is in the DX register in the
10142 * guest state.)
10143 *
10144 * @returns Strict VBox status code.
10145 * @param pVCpu The cross context virtual CPU structure.
10146 * @param cbValue The size of the I/O port access (1, 2, or 4).
10147 * @param enmAddrMode The addressing mode.
10148 * @param fRepPrefix Indicates whether a repeat prefix is used
10149 * (doesn't matter which for this instruction).
10150 * @param cbInstr The instruction length in bytes.
10151 * @param fIoChecked Whether the access to the I/O port has been
10152 * checked or not. It's typically checked in the
10153 * HM scenario.
10154 */
10155VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10156 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10157{
10158 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10159
10160 /*
10161 * State init.
10162 */
10163 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10164
10165 /*
10166 * Switch orgy for getting to the right handler.
10167 */
10168 VBOXSTRICTRC rcStrict;
10169 if (fRepPrefix)
10170 {
10171 switch (enmAddrMode)
10172 {
10173 case IEMMODE_16BIT:
10174 switch (cbValue)
10175 {
10176 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10177 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10178 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10179 default:
10180 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10181 }
10182 break;
10183
10184 case IEMMODE_32BIT:
10185 switch (cbValue)
10186 {
10187 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10188 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10189 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10190 default:
10191 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10192 }
10193 break;
10194
10195 case IEMMODE_64BIT:
10196 switch (cbValue)
10197 {
10198 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10199 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10200 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10201 default:
10202 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10203 }
10204 break;
10205
10206 default:
10207 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10208 }
10209 }
10210 else
10211 {
10212 switch (enmAddrMode)
10213 {
10214 case IEMMODE_16BIT:
10215 switch (cbValue)
10216 {
10217 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10218 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10219 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10220 default:
10221 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10222 }
10223 break;
10224
10225 case IEMMODE_32BIT:
10226 switch (cbValue)
10227 {
10228 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10229 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10230 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10231 default:
10232 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10233 }
10234 break;
10235
10236 case IEMMODE_64BIT:
10237 switch (cbValue)
10238 {
10239 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10240 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10241 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10242 default:
10243 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10244 }
10245 break;
10246
10247 default:
10248 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10249 }
10250 }
10251
10252 if ( pVCpu->iem.s.cActiveMappings == 0
10253 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10254 { /* likely */ }
10255 else
10256 {
10257 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10258 iemMemRollback(pVCpu);
10259 }
10260 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10261}
10262
10263
10264/**
10265 * Interface for rawmode to write execute an OUT instruction.
10266 *
10267 * @returns Strict VBox status code.
10268 * @param pVCpu The cross context virtual CPU structure.
10269 * @param cbInstr The instruction length in bytes.
10270 * @param u16Port The port to read.
10271 * @param fImm Whether the port is specified using an immediate operand or
10272 * using the implicit DX register.
10273 * @param cbReg The register size.
10274 *
10275 * @remarks In ring-0 not all of the state needs to be synced in.
10276 */
10277VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10278{
10279 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10280 Assert(cbReg <= 4 && cbReg != 3);
10281
10282 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10283 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10284 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10285 Assert(!pVCpu->iem.s.cActiveMappings);
10286 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10287}
10288
10289
10290/**
10291 * Interface for rawmode to write execute an IN instruction.
10292 *
10293 * @returns Strict VBox status code.
10294 * @param pVCpu The cross context virtual CPU structure.
10295 * @param cbInstr The instruction length in bytes.
10296 * @param u16Port The port to read.
10297 * @param fImm Whether the port is specified using an immediate operand or
10298 * using the implicit DX.
10299 * @param cbReg The register size.
10300 */
10301VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10302{
10303 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10304 Assert(cbReg <= 4 && cbReg != 3);
10305
10306 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10307 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10308 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10309 Assert(!pVCpu->iem.s.cActiveMappings);
10310 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10311}
10312
10313
10314/**
10315 * Interface for HM and EM to write to a CRx register.
10316 *
10317 * @returns Strict VBox status code.
10318 * @param pVCpu The cross context virtual CPU structure.
10319 * @param cbInstr The instruction length in bytes.
10320 * @param iCrReg The control register number (destination).
10321 * @param iGReg The general purpose register number (source).
10322 *
10323 * @remarks In ring-0 not all of the state needs to be synced in.
10324 */
10325VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10326{
10327 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10328 Assert(iCrReg < 16);
10329 Assert(iGReg < 16);
10330
10331 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10332 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10333 Assert(!pVCpu->iem.s.cActiveMappings);
10334 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10335}
10336
10337
10338/**
10339 * Interface for HM and EM to read from a CRx register.
10340 *
10341 * @returns Strict VBox status code.
10342 * @param pVCpu The cross context virtual CPU structure.
10343 * @param cbInstr The instruction length in bytes.
10344 * @param iGReg The general purpose register number (destination).
10345 * @param iCrReg The control register number (source).
10346 *
10347 * @remarks In ring-0 not all of the state needs to be synced in.
10348 */
10349VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10350{
10351 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10352 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10353 | CPUMCTX_EXTRN_APIC_TPR);
10354 Assert(iCrReg < 16);
10355 Assert(iGReg < 16);
10356
10357 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10358 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10359 Assert(!pVCpu->iem.s.cActiveMappings);
10360 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10361}
10362
10363
10364/**
10365 * Interface for HM and EM to write to a DRx register.
10366 *
10367 * @returns Strict VBox status code.
10368 * @param pVCpu The cross context virtual CPU structure.
10369 * @param cbInstr The instruction length in bytes.
10370 * @param iDrReg The debug register number (destination).
10371 * @param iGReg The general purpose register number (source).
10372 *
10373 * @remarks In ring-0 not all of the state needs to be synced in.
10374 */
10375VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10376{
10377 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10378 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10379 Assert(iDrReg < 8);
10380 Assert(iGReg < 16);
10381
10382 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10383 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10384 Assert(!pVCpu->iem.s.cActiveMappings);
10385 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10386}
10387
10388
10389/**
10390 * Interface for HM and EM to read from a DRx register.
10391 *
10392 * @returns Strict VBox status code.
10393 * @param pVCpu The cross context virtual CPU structure.
10394 * @param cbInstr The instruction length in bytes.
10395 * @param iGReg The general purpose register number (destination).
10396 * @param iDrReg The debug register number (source).
10397 *
10398 * @remarks In ring-0 not all of the state needs to be synced in.
10399 */
10400VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10401{
10402 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10403 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10404 Assert(iDrReg < 8);
10405 Assert(iGReg < 16);
10406
10407 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10408 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10409 Assert(!pVCpu->iem.s.cActiveMappings);
10410 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10411}
10412
10413
10414/**
10415 * Interface for HM and EM to clear the CR0[TS] bit.
10416 *
10417 * @returns Strict VBox status code.
10418 * @param pVCpu The cross context virtual CPU structure.
10419 * @param cbInstr The instruction length in bytes.
10420 *
10421 * @remarks In ring-0 not all of the state needs to be synced in.
10422 */
10423VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10424{
10425 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10426
10427 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10428 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10429 Assert(!pVCpu->iem.s.cActiveMappings);
10430 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10431}
10432
10433
10434/**
10435 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10436 *
10437 * @returns Strict VBox status code.
10438 * @param pVCpu The cross context virtual CPU structure.
10439 * @param cbInstr The instruction length in bytes.
10440 * @param uValue The value to load into CR0.
10441 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10442 * memory operand. Otherwise pass NIL_RTGCPTR.
10443 *
10444 * @remarks In ring-0 not all of the state needs to be synced in.
10445 */
10446VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10447{
10448 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10449
10450 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10451 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10452 Assert(!pVCpu->iem.s.cActiveMappings);
10453 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10454}
10455
10456
10457/**
10458 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10459 *
10460 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10461 *
10462 * @returns Strict VBox status code.
10463 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10464 * @param cbInstr The instruction length in bytes.
10465 * @remarks In ring-0 not all of the state needs to be synced in.
10466 * @thread EMT(pVCpu)
10467 */
10468VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10469{
10470 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10471
10472 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10473 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10474 Assert(!pVCpu->iem.s.cActiveMappings);
10475 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10476}
10477
10478
10479/**
10480 * Interface for HM and EM to emulate the WBINVD instruction.
10481 *
10482 * @returns Strict VBox status code.
10483 * @param pVCpu The cross context virtual CPU structure.
10484 * @param cbInstr The instruction length in bytes.
10485 *
10486 * @remarks In ring-0 not all of the state needs to be synced in.
10487 */
10488VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10489{
10490 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10491
10492 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10493 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10494 Assert(!pVCpu->iem.s.cActiveMappings);
10495 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10496}
10497
10498
10499/**
10500 * Interface for HM and EM to emulate the INVD instruction.
10501 *
10502 * @returns Strict VBox status code.
10503 * @param pVCpu The cross context virtual CPU structure.
10504 * @param cbInstr The instruction length in bytes.
10505 *
10506 * @remarks In ring-0 not all of the state needs to be synced in.
10507 */
10508VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10509{
10510 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10511
10512 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10513 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10514 Assert(!pVCpu->iem.s.cActiveMappings);
10515 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10516}
10517
10518
10519/**
10520 * Interface for HM and EM to emulate the INVLPG instruction.
10521 *
10522 * @returns Strict VBox status code.
10523 * @retval VINF_PGM_SYNC_CR3
10524 *
10525 * @param pVCpu The cross context virtual CPU structure.
10526 * @param cbInstr The instruction length in bytes.
10527 * @param GCPtrPage The effective address of the page to invalidate.
10528 *
10529 * @remarks In ring-0 not all of the state needs to be synced in.
10530 */
10531VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10532{
10533 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10534
10535 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10536 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10537 Assert(!pVCpu->iem.s.cActiveMappings);
10538 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10539}
10540
10541
10542/**
10543 * Interface for HM and EM to emulate the INVPCID instruction.
10544 *
10545 * @returns Strict VBox status code.
10546 * @retval VINF_PGM_SYNC_CR3
10547 *
10548 * @param pVCpu The cross context virtual CPU structure.
10549 * @param cbInstr The instruction length in bytes.
10550 * @param iEffSeg The effective segment register.
10551 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10552 * @param uType The invalidation type.
10553 *
10554 * @remarks In ring-0 not all of the state needs to be synced in.
10555 */
10556VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10557 uint64_t uType)
10558{
10559 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10560
10561 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10562 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10563 Assert(!pVCpu->iem.s.cActiveMappings);
10564 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10565}
10566
10567
10568/**
10569 * Interface for HM and EM to emulate the CPUID instruction.
10570 *
10571 * @returns Strict VBox status code.
10572 *
10573 * @param pVCpu The cross context virtual CPU structure.
10574 * @param cbInstr The instruction length in bytes.
10575 *
10576 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10577 */
10578VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10579{
10580 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10581 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10582
10583 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10584 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10585 Assert(!pVCpu->iem.s.cActiveMappings);
10586 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10587}
10588
10589
10590/**
10591 * Interface for HM and EM to emulate the RDPMC instruction.
10592 *
10593 * @returns Strict VBox status code.
10594 *
10595 * @param pVCpu The cross context virtual CPU structure.
10596 * @param cbInstr The instruction length in bytes.
10597 *
10598 * @remarks Not all of the state needs to be synced in.
10599 */
10600VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10601{
10602 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10603 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10604
10605 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10606 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10607 Assert(!pVCpu->iem.s.cActiveMappings);
10608 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10609}
10610
10611
10612/**
10613 * Interface for HM and EM to emulate the RDTSC instruction.
10614 *
10615 * @returns Strict VBox status code.
10616 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10617 *
10618 * @param pVCpu The cross context virtual CPU structure.
10619 * @param cbInstr The instruction length in bytes.
10620 *
10621 * @remarks Not all of the state needs to be synced in.
10622 */
10623VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10624{
10625 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10626 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10627
10628 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10629 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10630 Assert(!pVCpu->iem.s.cActiveMappings);
10631 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10632}
10633
10634
10635/**
10636 * Interface for HM and EM to emulate the RDTSCP instruction.
10637 *
10638 * @returns Strict VBox status code.
10639 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10640 *
10641 * @param pVCpu The cross context virtual CPU structure.
10642 * @param cbInstr The instruction length in bytes.
10643 *
10644 * @remarks Not all of the state needs to be synced in. Recommended
10645 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10646 */
10647VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10648{
10649 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10650 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10651
10652 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10653 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10654 Assert(!pVCpu->iem.s.cActiveMappings);
10655 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10656}
10657
10658
10659/**
10660 * Interface for HM and EM to emulate the RDMSR instruction.
10661 *
10662 * @returns Strict VBox status code.
10663 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10664 *
10665 * @param pVCpu The cross context virtual CPU structure.
10666 * @param cbInstr The instruction length in bytes.
10667 *
10668 * @remarks Not all of the state needs to be synced in. Requires RCX and
10669 * (currently) all MSRs.
10670 */
10671VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10672{
10673 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10674 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10675
10676 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10677 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10678 Assert(!pVCpu->iem.s.cActiveMappings);
10679 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10680}
10681
10682
10683/**
10684 * Interface for HM and EM to emulate the WRMSR instruction.
10685 *
10686 * @returns Strict VBox status code.
10687 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10688 *
10689 * @param pVCpu The cross context virtual CPU structure.
10690 * @param cbInstr The instruction length in bytes.
10691 *
10692 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10693 * and (currently) all MSRs.
10694 */
10695VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10696{
10697 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10698 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10699 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10700
10701 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10702 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10703 Assert(!pVCpu->iem.s.cActiveMappings);
10704 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10705}
10706
10707
10708/**
10709 * Interface for HM and EM to emulate the MONITOR instruction.
10710 *
10711 * @returns Strict VBox status code.
10712 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10713 *
10714 * @param pVCpu The cross context virtual CPU structure.
10715 * @param cbInstr The instruction length in bytes.
10716 *
10717 * @remarks Not all of the state needs to be synced in.
10718 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10719 * are used.
10720 */
10721VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10722{
10723 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10724 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10725
10726 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10727 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10728 Assert(!pVCpu->iem.s.cActiveMappings);
10729 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10730}
10731
10732
10733/**
10734 * Interface for HM and EM to emulate the MWAIT instruction.
10735 *
10736 * @returns Strict VBox status code.
10737 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10738 *
10739 * @param pVCpu The cross context virtual CPU structure.
10740 * @param cbInstr The instruction length in bytes.
10741 *
10742 * @remarks Not all of the state needs to be synced in.
10743 */
10744VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10745{
10746 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10747 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10748
10749 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10750 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10751 Assert(!pVCpu->iem.s.cActiveMappings);
10752 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10753}
10754
10755
10756/**
10757 * Interface for HM and EM to emulate the HLT instruction.
10758 *
10759 * @returns Strict VBox status code.
10760 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10761 *
10762 * @param pVCpu The cross context virtual CPU structure.
10763 * @param cbInstr The instruction length in bytes.
10764 *
10765 * @remarks Not all of the state needs to be synced in.
10766 */
10767VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10768{
10769 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10770
10771 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10772 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10773 Assert(!pVCpu->iem.s.cActiveMappings);
10774 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10775}
10776
10777
10778/**
10779 * Checks if IEM is in the process of delivering an event (interrupt or
10780 * exception).
10781 *
10782 * @returns true if we're in the process of raising an interrupt or exception,
10783 * false otherwise.
10784 * @param pVCpu The cross context virtual CPU structure.
10785 * @param puVector Where to store the vector associated with the
10786 * currently delivered event, optional.
10787 * @param pfFlags Where to store th event delivery flags (see
10788 * IEM_XCPT_FLAGS_XXX), optional.
10789 * @param puErr Where to store the error code associated with the
10790 * event, optional.
10791 * @param puCr2 Where to store the CR2 associated with the event,
10792 * optional.
10793 * @remarks The caller should check the flags to determine if the error code and
10794 * CR2 are valid for the event.
10795 */
10796VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10797{
10798 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10799 if (fRaisingXcpt)
10800 {
10801 if (puVector)
10802 *puVector = pVCpu->iem.s.uCurXcpt;
10803 if (pfFlags)
10804 *pfFlags = pVCpu->iem.s.fCurXcpt;
10805 if (puErr)
10806 *puErr = pVCpu->iem.s.uCurXcptErr;
10807 if (puCr2)
10808 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10809 }
10810 return fRaisingXcpt;
10811}
10812
10813#ifdef IN_RING3
10814
10815/**
10816 * Handles the unlikely and probably fatal merge cases.
10817 *
10818 * @returns Merged status code.
10819 * @param rcStrict Current EM status code.
10820 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10821 * with @a rcStrict.
10822 * @param iMemMap The memory mapping index. For error reporting only.
10823 * @param pVCpu The cross context virtual CPU structure of the calling
10824 * thread, for error reporting only.
10825 */
10826DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10827 unsigned iMemMap, PVMCPUCC pVCpu)
10828{
10829 if (RT_FAILURE_NP(rcStrict))
10830 return rcStrict;
10831
10832 if (RT_FAILURE_NP(rcStrictCommit))
10833 return rcStrictCommit;
10834
10835 if (rcStrict == rcStrictCommit)
10836 return rcStrictCommit;
10837
10838 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10839 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10840 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10841 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10842 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10843 return VERR_IOM_FF_STATUS_IPE;
10844}
10845
10846
10847/**
10848 * Helper for IOMR3ProcessForceFlag.
10849 *
10850 * @returns Merged status code.
10851 * @param rcStrict Current EM status code.
10852 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10853 * with @a rcStrict.
10854 * @param iMemMap The memory mapping index. For error reporting only.
10855 * @param pVCpu The cross context virtual CPU structure of the calling
10856 * thread, for error reporting only.
10857 */
10858DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10859{
10860 /* Simple. */
10861 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10862 return rcStrictCommit;
10863
10864 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10865 return rcStrict;
10866
10867 /* EM scheduling status codes. */
10868 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10869 && rcStrict <= VINF_EM_LAST))
10870 {
10871 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10872 && rcStrictCommit <= VINF_EM_LAST))
10873 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10874 }
10875
10876 /* Unlikely */
10877 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10878}
10879
10880
10881/**
10882 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10883 *
10884 * @returns Merge between @a rcStrict and what the commit operation returned.
10885 * @param pVM The cross context VM structure.
10886 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10887 * @param rcStrict The status code returned by ring-0 or raw-mode.
10888 */
10889VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10890{
10891 /*
10892 * Reset the pending commit.
10893 */
10894 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10895 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10896 ("%#x %#x %#x\n",
10897 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10898 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10899
10900 /*
10901 * Commit the pending bounce buffers (usually just one).
10902 */
10903 unsigned cBufs = 0;
10904 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10905 while (iMemMap-- > 0)
10906 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10907 {
10908 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10909 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10910 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10911
10912 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10913 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10914 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10915
10916 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10917 {
10918 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10919 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10920 pbBuf,
10921 cbFirst,
10922 PGMACCESSORIGIN_IEM);
10923 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10924 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10925 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10926 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10927 }
10928
10929 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10930 {
10931 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10932 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10933 pbBuf + cbFirst,
10934 cbSecond,
10935 PGMACCESSORIGIN_IEM);
10936 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10937 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10938 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10939 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10940 }
10941 cBufs++;
10942 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10943 }
10944
10945 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10946 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10947 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10948 pVCpu->iem.s.cActiveMappings = 0;
10949 return rcStrict;
10950}
10951
10952#endif /* IN_RING3 */
10953
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette