VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 105318

Last change on this file since 105318 was 105291, checked in by vboxsync, 7 months ago

VMM/IEM: Added iemMemFetchDataU32NoAc and iemMemFetchDataU64NoAc (with variations). bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 458.8 KB
Line 
1/* $Id: IEMAll.cpp 105291 2024-07-12 10:01:53Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gcm.h>
134#include <VBox/vmm/gim.h>
135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
136# include <VBox/vmm/em.h>
137# include <VBox/vmm/hm_svm.h>
138#endif
139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
140# include <VBox/vmm/hmvmxinline.h>
141#endif
142#include <VBox/vmm/tm.h>
143#include <VBox/vmm/dbgf.h>
144#include <VBox/vmm/dbgftrace.h>
145#include "IEMInternal.h"
146#include <VBox/vmm/vmcc.h>
147#include <VBox/log.h>
148#include <VBox/err.h>
149#include <VBox/param.h>
150#include <VBox/dis.h>
151#include <iprt/asm-math.h>
152#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
153# include <iprt/asm-amd64-x86.h>
154#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
155# include <iprt/asm-arm.h>
156#endif
157#include <iprt/assert.h>
158#include <iprt/string.h>
159#include <iprt/x86.h>
160
161#include "IEMInline.h"
162
163
164/*********************************************************************************************************************************
165* Structures and Typedefs *
166*********************************************************************************************************************************/
167/**
168 * CPU exception classes.
169 */
170typedef enum IEMXCPTCLASS
171{
172 IEMXCPTCLASS_BENIGN,
173 IEMXCPTCLASS_CONTRIBUTORY,
174 IEMXCPTCLASS_PAGE_FAULT,
175 IEMXCPTCLASS_DOUBLE_FAULT
176} IEMXCPTCLASS;
177
178
179/*********************************************************************************************************************************
180* Global Variables *
181*********************************************************************************************************************************/
182#if defined(IEM_LOG_MEMORY_WRITES)
183/** What IEM just wrote. */
184uint8_t g_abIemWrote[256];
185/** How much IEM just wrote. */
186size_t g_cbIemWrote;
187#endif
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
194 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
195
196
197/**
198 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
199 * path.
200 *
201 * This will also invalidate TLB entries for any pages with active data
202 * breakpoints on them.
203 *
204 * @returns IEM_F_BRK_PENDING_XXX or zero.
205 * @param pVCpu The cross context virtual CPU structure of the
206 * calling thread.
207 *
208 * @note Don't call directly, use iemCalcExecDbgFlags instead.
209 */
210uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
211{
212 uint32_t fExec = 0;
213
214 /*
215 * Helper for invalidate the data TLB for breakpoint addresses.
216 *
217 * This is to make sure any access to the page will always trigger a TLB
218 * load for as long as the breakpoint is enabled.
219 */
220#ifdef IEM_WITH_DATA_TLB
221# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
222 RTGCPTR uTagNoRev = (a_uValue); \
223 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
224 /** @todo do large page accounting */ \
225 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
226 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
227 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
228 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
229 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
230 } while (0)
231#else
232# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
233#endif
234
235 /*
236 * Process guest breakpoints.
237 */
238#define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
239 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
240 { \
241 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
242 { \
243 case X86_DR7_RW_EO: \
244 fExec |= IEM_F_PENDING_BRK_INSTR; \
245 break; \
246 case X86_DR7_RW_WO: \
247 case X86_DR7_RW_RW: \
248 fExec |= IEM_F_PENDING_BRK_DATA; \
249 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
250 break; \
251 case X86_DR7_RW_IO: \
252 fExec |= IEM_F_PENDING_BRK_X86_IO; \
253 break; \
254 } \
255 } \
256 } while (0)
257
258 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
259 if (fGstDr7 & X86_DR7_ENABLED_MASK)
260 {
261/** @todo extract more details here to simplify matching later. */
262#ifdef IEM_WITH_DATA_TLB
263 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
264#endif
265 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
266 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
267 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
268 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
269 }
270
271 /*
272 * Process hypervisor breakpoints.
273 */
274 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
275 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
276 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
277 {
278/** @todo extract more details here to simplify matching later. */
279 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
280 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
281 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
282 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
283 }
284
285 return fExec;
286}
287
288
289/**
290 * Initializes the decoder state.
291 *
292 * iemReInitDecoder is mostly a copy of this function.
293 *
294 * @param pVCpu The cross context virtual CPU structure of the
295 * calling thread.
296 * @param fExecOpts Optional execution flags:
297 * - IEM_F_BYPASS_HANDLERS
298 * - IEM_F_X86_DISREGARD_LOCK
299 */
300DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
301{
302 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
303 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
312
313 /* Execution state: */
314 uint32_t fExec;
315 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
316
317 /* Decoder state: */
318 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
319 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
320 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
321 {
322 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
323 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
324 }
325 else
326 {
327 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
328 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
329 }
330 pVCpu->iem.s.fPrefixes = 0;
331 pVCpu->iem.s.uRexReg = 0;
332 pVCpu->iem.s.uRexB = 0;
333 pVCpu->iem.s.uRexIndex = 0;
334 pVCpu->iem.s.idxPrefix = 0;
335 pVCpu->iem.s.uVex3rdReg = 0;
336 pVCpu->iem.s.uVexLength = 0;
337 pVCpu->iem.s.fEvexStuff = 0;
338 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
339#ifdef IEM_WITH_CODE_TLB
340 pVCpu->iem.s.pbInstrBuf = NULL;
341 pVCpu->iem.s.offInstrNextByte = 0;
342 pVCpu->iem.s.offCurInstrStart = 0;
343# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
344 pVCpu->iem.s.offOpcode = 0;
345# endif
346# ifdef VBOX_STRICT
347 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
348 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
349 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
350 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
351# endif
352#else
353 pVCpu->iem.s.offOpcode = 0;
354 pVCpu->iem.s.cbOpcode = 0;
355#endif
356 pVCpu->iem.s.offModRm = 0;
357 pVCpu->iem.s.cActiveMappings = 0;
358 pVCpu->iem.s.iNextMapping = 0;
359 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
360
361#ifdef DBGFTRACE_ENABLED
362 switch (IEM_GET_CPU_MODE(pVCpu))
363 {
364 case IEMMODE_64BIT:
365 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
366 break;
367 case IEMMODE_32BIT:
368 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
369 break;
370 case IEMMODE_16BIT:
371 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
372 break;
373 }
374#endif
375}
376
377
378/**
379 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
380 *
381 * This is mostly a copy of iemInitDecoder.
382 *
383 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
384 */
385DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
386{
387 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
388 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
395 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
396
397 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
398 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
399 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
400
401 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
402 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
403 pVCpu->iem.s.enmEffAddrMode = enmMode;
404 if (enmMode != IEMMODE_64BIT)
405 {
406 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
407 pVCpu->iem.s.enmEffOpSize = enmMode;
408 }
409 else
410 {
411 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
412 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
413 }
414 pVCpu->iem.s.fPrefixes = 0;
415 pVCpu->iem.s.uRexReg = 0;
416 pVCpu->iem.s.uRexB = 0;
417 pVCpu->iem.s.uRexIndex = 0;
418 pVCpu->iem.s.idxPrefix = 0;
419 pVCpu->iem.s.uVex3rdReg = 0;
420 pVCpu->iem.s.uVexLength = 0;
421 pVCpu->iem.s.fEvexStuff = 0;
422 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
423#ifdef IEM_WITH_CODE_TLB
424 if (pVCpu->iem.s.pbInstrBuf)
425 {
426 uint64_t off = (enmMode == IEMMODE_64BIT
427 ? pVCpu->cpum.GstCtx.rip
428 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
429 - pVCpu->iem.s.uInstrBufPc;
430 if (off < pVCpu->iem.s.cbInstrBufTotal)
431 {
432 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
433 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
434 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
435 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
436 else
437 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
438 }
439 else
440 {
441 pVCpu->iem.s.pbInstrBuf = NULL;
442 pVCpu->iem.s.offInstrNextByte = 0;
443 pVCpu->iem.s.offCurInstrStart = 0;
444 pVCpu->iem.s.cbInstrBuf = 0;
445 pVCpu->iem.s.cbInstrBufTotal = 0;
446 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
447 }
448 }
449 else
450 {
451 pVCpu->iem.s.offInstrNextByte = 0;
452 pVCpu->iem.s.offCurInstrStart = 0;
453 pVCpu->iem.s.cbInstrBuf = 0;
454 pVCpu->iem.s.cbInstrBufTotal = 0;
455# ifdef VBOX_STRICT
456 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
457# endif
458 }
459# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
460 pVCpu->iem.s.offOpcode = 0;
461# endif
462#else /* !IEM_WITH_CODE_TLB */
463 pVCpu->iem.s.cbOpcode = 0;
464 pVCpu->iem.s.offOpcode = 0;
465#endif /* !IEM_WITH_CODE_TLB */
466 pVCpu->iem.s.offModRm = 0;
467 Assert(pVCpu->iem.s.cActiveMappings == 0);
468 pVCpu->iem.s.iNextMapping = 0;
469 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
470 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
471
472#ifdef DBGFTRACE_ENABLED
473 switch (enmMode)
474 {
475 case IEMMODE_64BIT:
476 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
477 break;
478 case IEMMODE_32BIT:
479 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
480 break;
481 case IEMMODE_16BIT:
482 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
483 break;
484 }
485#endif
486}
487
488
489
490/**
491 * Prefetch opcodes the first time when starting executing.
492 *
493 * @returns Strict VBox status code.
494 * @param pVCpu The cross context virtual CPU structure of the
495 * calling thread.
496 * @param fExecOpts Optional execution flags:
497 * - IEM_F_BYPASS_HANDLERS
498 * - IEM_F_X86_DISREGARD_LOCK
499 */
500static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
501{
502 iemInitDecoder(pVCpu, fExecOpts);
503
504#ifndef IEM_WITH_CODE_TLB
505 /*
506 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
507 *
508 * First translate CS:rIP to a physical address.
509 *
510 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
511 * all relevant bytes from the first page, as it ASSUMES it's only ever
512 * called for dealing with CS.LIM, page crossing and instructions that
513 * are too long.
514 */
515 uint32_t cbToTryRead;
516 RTGCPTR GCPtrPC;
517 if (IEM_IS_64BIT_CODE(pVCpu))
518 {
519 cbToTryRead = GUEST_PAGE_SIZE;
520 GCPtrPC = pVCpu->cpum.GstCtx.rip;
521 if (IEM_IS_CANONICAL(GCPtrPC))
522 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
523 else
524 return iemRaiseGeneralProtectionFault0(pVCpu);
525 }
526 else
527 {
528 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
529 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
530 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
531 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
532 else
533 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
534 if (cbToTryRead) { /* likely */ }
535 else /* overflowed */
536 {
537 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
538 cbToTryRead = UINT32_MAX;
539 }
540 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
541 Assert(GCPtrPC <= UINT32_MAX);
542 }
543
544 PGMPTWALKFAST WalkFast;
545 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
546 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
547 &WalkFast);
548 if (RT_SUCCESS(rc))
549 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
550 else
551 {
552 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
553# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
554/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
555 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
556 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
557 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
558# endif
559 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
560 }
561#if 0
562 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
563 else
564 {
565 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
566# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
567/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
568# error completely wrong
569 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
570 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
571# endif
572 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
573 }
574 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
575 else
576 {
577 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
578# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
579/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
580# error completely wrong.
581 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
582 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
583# endif
584 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
585 }
586#else
587 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
588 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
589#endif
590 RTGCPHYS const GCPhys = WalkFast.GCPhys;
591
592 /*
593 * Read the bytes at this address.
594 */
595 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
596 if (cbToTryRead > cbLeftOnPage)
597 cbToTryRead = cbLeftOnPage;
598 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
599 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
600
601 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
602 {
603 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
604 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
605 { /* likely */ }
606 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
607 {
608 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
609 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
610 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
611 }
612 else
613 {
614 Log((RT_SUCCESS(rcStrict)
615 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
616 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
617 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
618 return rcStrict;
619 }
620 }
621 else
622 {
623 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
624 if (RT_SUCCESS(rc))
625 { /* likely */ }
626 else
627 {
628 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
629 GCPtrPC, GCPhys, rc, cbToTryRead));
630 return rc;
631 }
632 }
633 pVCpu->iem.s.cbOpcode = cbToTryRead;
634#endif /* !IEM_WITH_CODE_TLB */
635 return VINF_SUCCESS;
636}
637
638
639#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
640/**
641 * Helper for doing large page accounting at TLB load time.
642 */
643template<bool const a_fGlobal>
644DECL_FORCE_INLINE(void) iemTlbLoadedLargePage(IEMTLB *pTlb, RTGCPTR uTagNoRev, bool f2MbLargePages)
645{
646 if (a_fGlobal)
647 pTlb->cTlbGlobalLargePageCurLoads++;
648 else
649 pTlb->cTlbNonGlobalLargePageCurLoads++;
650
651 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
652 uint32_t const fMask = (f2MbLargePages ? _2M - 1U : _4M - 1U) >> GUEST_PAGE_SHIFT;
653 IEMTLB::LARGEPAGERANGE * const pRange = a_fGlobal
654 ? &pTlb->GlobalLargePageRange
655 : &pTlb->NonGlobalLargePageRange;
656 uTagNoRev &= ~(RTGCPTR)fMask;
657 if (uTagNoRev < pRange->uFirstTag)
658 pRange->uFirstTag = uTagNoRev;
659
660 uTagNoRev |= fMask;
661 if (uTagNoRev > pRange->uLastTag)
662 pRange->uLastTag = uTagNoRev;
663}
664#endif
665
666
667#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
668/**
669 * Worker for iemTlbInvalidateAll.
670 */
671template<bool a_fGlobal>
672DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
673{
674 if (!a_fGlobal)
675 pTlb->cTlsFlushes++;
676 else
677 pTlb->cTlsGlobalFlushes++;
678
679 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
680 if (RT_LIKELY(pTlb->uTlbRevision != 0))
681 { /* very likely */ }
682 else
683 {
684 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
685 pTlb->cTlbRevisionRollovers++;
686 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
687 while (i-- > 0)
688 pTlb->aEntries[i * 2].uTag = 0;
689 }
690
691 pTlb->cTlbNonGlobalLargePageCurLoads = 0;
692 pTlb->NonGlobalLargePageRange.uLastTag = 0;
693 pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
694
695 if (a_fGlobal)
696 {
697 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
698 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
699 { /* very likely */ }
700 else
701 {
702 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
703 pTlb->cTlbRevisionRollovers++;
704 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
705 while (i-- > 0)
706 pTlb->aEntries[i * 2 + 1].uTag = 0;
707 }
708
709 pTlb->cTlbGlobalLargePageCurLoads = 0;
710 pTlb->GlobalLargePageRange.uLastTag = 0;
711 pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
712 }
713}
714#endif
715
716
717/**
718 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
719 */
720template<bool a_fGlobal>
721DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
722{
723#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
724 Log10(("IEMTlbInvalidateAll\n"));
725
726# ifdef IEM_WITH_CODE_TLB
727 pVCpu->iem.s.cbInstrBufTotal = 0;
728 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
729# endif
730
731# ifdef IEM_WITH_DATA_TLB
732 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
733# endif
734#else
735 RT_NOREF(pVCpu);
736#endif
737}
738
739
740/**
741 * Invalidates non-global the IEM TLB entries.
742 *
743 * This is called internally as well as by PGM when moving GC mappings.
744 *
745 * @param pVCpu The cross context virtual CPU structure of the calling
746 * thread.
747 */
748VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
749{
750 iemTlbInvalidateAll<false>(pVCpu);
751}
752
753
754/**
755 * Invalidates all the IEM TLB entries.
756 *
757 * This is called internally as well as by PGM when moving GC mappings.
758 *
759 * @param pVCpu The cross context virtual CPU structure of the calling
760 * thread.
761 */
762VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
763{
764 iemTlbInvalidateAll<true>(pVCpu);
765}
766
767
768#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
769
770template<bool const a_fDataTlb, bool const a_f2MbLargePage, bool const a_fGlobal, bool const a_fNonGlobal>
771DECLINLINE(void) iemTlbInvalidateLargePageWorkerInner(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, RTGCPTR GCPtrInstrBufPcTag)
772{
773 /* Combine TAG values with the TLB revisions. */
774 RTGCPTR GCPtrTagGlob = a_fGlobal ? GCPtrTag | pTlb->uTlbRevisionGlobal : 0;
775 if (a_fNonGlobal)
776 GCPtrTag |= pTlb->uTlbRevision;
777
778 /* Set up the scan. */
779 bool const fPartialScan = IEMTLB_ENTRY_COUNT >= (a_f2MbLargePage ? 512 : 1024);
780 uintptr_t idxEven = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) : 0;
781 uintptr_t const idxEvenEnd = fPartialScan ? idxEven + (a_f2MbLargePage ? 512 : 1024) : IEMTLB_ENTRY_COUNT;
782 RTGCPTR const GCPtrTagMask = fPartialScan
783 ? ~(RTGCPTR)0
784 : ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK
785 & ~(RTGCPTR)( ( RT_BIT_64((a_f2MbLargePage ? 9 : 10) - IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO)
786 - 1U)
787 << IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO);
788
789 /*
790 * Do the scanning.
791 */
792 for (idxEven = 0; idxEven < idxEvenEnd; idxEven += 2)
793 {
794 if (a_fNonGlobal)
795 {
796 if ((pTlb->aEntries[idxEven].uTag & GCPtrTagMask) == GCPtrTag)
797 {
798 if (pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
799 {
800 pTlb->aEntries[idxEven].uTag = 0;
801 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
802 pVCpu->iem.s.cbInstrBufTotal = 0;
803 }
804 }
805 GCPtrTag++;
806 }
807
808 if (a_fGlobal)
809 {
810 if ((pTlb->aEntries[idxEven + 1].uTag & GCPtrTagMask) == GCPtrTagGlob)
811 {
812 if (pTlb->aEntries[idxEven + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
813 {
814 pTlb->aEntries[idxEven + 1].uTag = 0;
815 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
816 pVCpu->iem.s.cbInstrBufTotal = 0;
817 }
818 }
819 GCPtrTagGlob++;
820 }
821 }
822
823}
824
825template<bool const a_fDataTlb, bool const a_f2MbLargePage>
826DECLINLINE(void) iemTlbInvalidateLargePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, RTGCPTR GCPtrInstrBufPcTag)
827{
828 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
829
830 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64((a_f2MbLargePage ? 21 : 22) - GUEST_PAGE_SHIFT) - 1U);
831 if ( pTlb->GlobalLargePageRange.uFirstTag >= GCPtrTag
832 && pTlb->GlobalLargePageRange.uLastTag <= GCPtrTag)
833 {
834 if ( pTlb->NonGlobalLargePageRange.uFirstTag < GCPtrTag
835 || pTlb->NonGlobalLargePageRange.uLastTag > GCPtrTag)
836 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
837 else
838 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
839 }
840 else if ( pTlb->NonGlobalLargePageRange.uFirstTag < GCPtrTag
841 || pTlb->NonGlobalLargePageRange.uLastTag > GCPtrTag)
842 { /* Large pages aren't as likely in the non-global TLB half. */ }
843 else
844 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, false, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
845}
846
847template<bool const a_fDataTlb>
848DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven)
849{
850 /*
851 * Flush the entry pair.
852 */
853 if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))
854 {
855 pTlb->aEntries[idxEven].uTag = 0;
856 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
857 pVCpu->iem.s.cbInstrBufTotal = 0;
858 }
859 if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))
860 {
861 pTlb->aEntries[idxEven + 1].uTag = 0;
862 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
863 pVCpu->iem.s.cbInstrBufTotal = 0;
864 }
865
866 /*
867 * If there are (or has been) large pages in the TLB, we must check if the
868 * address being flushed may involve one of those, as then we'd have to
869 * scan for entries relating to the same page and flush those as well.
870 */
871# if 0 /** @todo do accurate counts or currently loaded large stuff and we can use those */
872 if (pTlb->cTlbGlobalLargePageCurLoads || pTlb->cTlbNonGlobalLargePageCurLoads)
873# else
874 if (pTlb->GlobalLargePageRange.uLastTag || pTlb->NonGlobalLargePageRange.uLastTag)
875# endif
876 {
877 RTGCPTR const GCPtrInstrBufPcTag = a_fDataTlb ? 0 : IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc);
878 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
879 iemTlbInvalidateLargePageWorker<a_fDataTlb, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
880 else
881 iemTlbInvalidateLargePageWorker<a_fDataTlb, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
882 }
883}
884
885#endif /* defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) */
886
887/**
888 * Invalidates a page in the TLBs.
889 *
890 * @param pVCpu The cross context virtual CPU structure of the calling
891 * thread.
892 * @param GCPtr The address of the page to invalidate
893 * @thread EMT(pVCpu)
894 */
895VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
896{
897#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
898 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
899 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
900 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
901 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
902
903# ifdef IEM_WITH_CODE_TLB
904 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
905# endif
906# ifdef IEM_WITH_DATA_TLB
907 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
908# endif
909#else
910 NOREF(pVCpu); NOREF(GCPtr);
911#endif
912}
913
914
915#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
916/**
917 * Invalid both TLBs slow fashion following a rollover.
918 *
919 * Worker for IEMTlbInvalidateAllPhysical,
920 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
921 * iemMemMapJmp and others.
922 *
923 * @thread EMT(pVCpu)
924 */
925static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
926{
927 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
928 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
929 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
930
931 unsigned i;
932# ifdef IEM_WITH_CODE_TLB
933 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
934 while (i-- > 0)
935 {
936 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
937 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
938 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
939 }
940 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
941 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
942# endif
943# ifdef IEM_WITH_DATA_TLB
944 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
945 while (i-- > 0)
946 {
947 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
948 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
949 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
950 }
951 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
952 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
953# endif
954
955}
956#endif
957
958
959/**
960 * Invalidates the host physical aspects of the IEM TLBs.
961 *
962 * This is called internally as well as by PGM when moving GC mappings.
963 *
964 * @param pVCpu The cross context virtual CPU structure of the calling
965 * thread.
966 * @note Currently not used.
967 */
968VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
969{
970#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
971 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
972 Log10(("IEMTlbInvalidateAllPhysical\n"));
973
974# ifdef IEM_WITH_CODE_TLB
975 pVCpu->iem.s.cbInstrBufTotal = 0;
976# endif
977 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
978 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
979 {
980 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
981 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
982 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
983 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
984 }
985 else
986 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
987#else
988 NOREF(pVCpu);
989#endif
990}
991
992
993/**
994 * Invalidates the host physical aspects of the IEM TLBs.
995 *
996 * This is called internally as well as by PGM when moving GC mappings.
997 *
998 * @param pVM The cross context VM structure.
999 * @param idCpuCaller The ID of the calling EMT if available to the caller,
1000 * otherwise NIL_VMCPUID.
1001 * @param enmReason The reason we're called.
1002 *
1003 * @remarks Caller holds the PGM lock.
1004 */
1005VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
1006{
1007#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1008 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
1009 if (pVCpuCaller)
1010 VMCPU_ASSERT_EMT(pVCpuCaller);
1011 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
1012
1013 VMCC_FOR_EACH_VMCPU(pVM)
1014 {
1015# ifdef IEM_WITH_CODE_TLB
1016 if (pVCpuCaller == pVCpu)
1017 pVCpu->iem.s.cbInstrBufTotal = 0;
1018# endif
1019
1020 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
1021 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
1022 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
1023 { /* likely */}
1024 else if (pVCpuCaller != pVCpu)
1025 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
1026 else
1027 {
1028 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1029 continue;
1030 }
1031 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1032 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
1033
1034 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1035 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
1036 }
1037 VMCC_FOR_EACH_VMCPU_END(pVM);
1038
1039#else
1040 RT_NOREF(pVM, idCpuCaller, enmReason);
1041#endif
1042}
1043
1044
1045/**
1046 * Flushes the prefetch buffer, light version.
1047 */
1048void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
1049{
1050#ifndef IEM_WITH_CODE_TLB
1051 pVCpu->iem.s.cbOpcode = cbInstr;
1052#else
1053 RT_NOREF(pVCpu, cbInstr);
1054#endif
1055}
1056
1057
1058/**
1059 * Flushes the prefetch buffer, heavy version.
1060 */
1061void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
1062{
1063#ifndef IEM_WITH_CODE_TLB
1064 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
1065#elif 1
1066 pVCpu->iem.s.cbInstrBufTotal = 0;
1067 RT_NOREF(cbInstr);
1068#else
1069 RT_NOREF(pVCpu, cbInstr);
1070#endif
1071}
1072
1073
1074
1075#ifdef IEM_WITH_CODE_TLB
1076
1077/**
1078 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1079 * failure and jumps.
1080 *
1081 * We end up here for a number of reasons:
1082 * - pbInstrBuf isn't yet initialized.
1083 * - Advancing beyond the buffer boundrary (e.g. cross page).
1084 * - Advancing beyond the CS segment limit.
1085 * - Fetching from non-mappable page (e.g. MMIO).
1086 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
1087 *
1088 * @param pVCpu The cross context virtual CPU structure of the
1089 * calling thread.
1090 * @param pvDst Where to return the bytes.
1091 * @param cbDst Number of bytes to read. A value of zero is
1092 * allowed for initializing pbInstrBuf (the
1093 * recompiler does this). In this case it is best
1094 * to set pbInstrBuf to NULL prior to the call.
1095 */
1096void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
1097{
1098# ifdef IN_RING3
1099 for (;;)
1100 {
1101 Assert(cbDst <= 8);
1102 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1103
1104 /*
1105 * We might have a partial buffer match, deal with that first to make the
1106 * rest simpler. This is the first part of the cross page/buffer case.
1107 */
1108 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
1109 if (pbInstrBuf != NULL)
1110 {
1111 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
1112 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
1113 if (offBuf < cbInstrBuf)
1114 {
1115 Assert(offBuf + cbDst > cbInstrBuf);
1116 uint32_t const cbCopy = cbInstrBuf - offBuf;
1117 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
1118
1119 cbDst -= cbCopy;
1120 pvDst = (uint8_t *)pvDst + cbCopy;
1121 offBuf += cbCopy;
1122 }
1123 }
1124
1125 /*
1126 * Check segment limit, figuring how much we're allowed to access at this point.
1127 *
1128 * We will fault immediately if RIP is past the segment limit / in non-canonical
1129 * territory. If we do continue, there are one or more bytes to read before we
1130 * end up in trouble and we need to do that first before faulting.
1131 */
1132 RTGCPTR GCPtrFirst;
1133 uint32_t cbMaxRead;
1134 if (IEM_IS_64BIT_CODE(pVCpu))
1135 {
1136 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1137 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1138 { /* likely */ }
1139 else
1140 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1141 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1142 }
1143 else
1144 {
1145 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1146 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1147 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1148 { /* likely */ }
1149 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
1150 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1151 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1152 if (cbMaxRead != 0)
1153 { /* likely */ }
1154 else
1155 {
1156 /* Overflowed because address is 0 and limit is max. */
1157 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1158 cbMaxRead = X86_PAGE_SIZE;
1159 }
1160 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1161 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1162 if (cbMaxRead2 < cbMaxRead)
1163 cbMaxRead = cbMaxRead2;
1164 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1165 }
1166
1167 /*
1168 * Get the TLB entry for this piece of code.
1169 */
1170 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
1171 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
1172 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
1173 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
1174 {
1175 /* likely when executing lots of code, otherwise unlikely */
1176# ifdef IEM_WITH_TLB_STATISTICS
1177 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
1178# endif
1179 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1180
1181 /* Check TLB page table level access flags. */
1182 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1183 {
1184 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
1185 {
1186 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1187 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1188 }
1189 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1190 {
1191 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1192 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1193 }
1194 }
1195
1196 /* Look up the physical page info if necessary. */
1197 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1198 { /* not necessary */ }
1199 else
1200 {
1201 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1202 { /* likely */ }
1203 else
1204 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1205 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
1206 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1207 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1208 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1209 }
1210 }
1211 else
1212 {
1213 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
1214
1215 /* This page table walking will set A bits as required by the access while performing the walk.
1216 ASSUMES these are set when the address is translated rather than on commit... */
1217 /** @todo testcase: check when A bits are actually set by the CPU for code. */
1218 PGMPTWALKFAST WalkFast;
1219 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
1220 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1221 &WalkFast);
1222 if (RT_SUCCESS(rc))
1223 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1224 else
1225 {
1226#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1227 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
1228 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
1229#endif
1230 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1231 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
1232 }
1233
1234 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1235 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
1236 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
1237 {
1238 pTlbe--;
1239 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
1240 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1241 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1242 }
1243 else
1244 {
1245 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
1246 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
1247 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1248 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1249 }
1250 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
1251 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
1252 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
1253 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1254 pTlbe->GCPhys = GCPhysPg;
1255 pTlbe->pbMappingR3 = NULL;
1256 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1257 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
1258 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1259
1260 /* Resolve the physical address. */
1261 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1262 { /* likely */ }
1263 else
1264 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1265 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
1266 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1267 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1268 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1269 }
1270
1271# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1272 /*
1273 * Try do a direct read using the pbMappingR3 pointer.
1274 * Note! Do not recheck the physical TLB revision number here as we have the
1275 * wrong response to changes in the else case. If someone is updating
1276 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
1277 * pretending we always won the race.
1278 */
1279 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1280 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
1281 {
1282 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1283 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1284 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1285 {
1286 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1287 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1288 }
1289 else
1290 {
1291 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1292 if (cbInstr + (uint32_t)cbDst <= 15)
1293 {
1294 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1295 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1296 }
1297 else
1298 {
1299 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1300 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1301 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1302 }
1303 }
1304 if (cbDst <= cbMaxRead)
1305 {
1306 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1307 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1308
1309 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1310 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1311 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1312 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1313 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1314 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1315 else
1316 Assert(!pvDst);
1317 return;
1318 }
1319 pVCpu->iem.s.pbInstrBuf = NULL;
1320
1321 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1322 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1323 }
1324# else
1325# error "refactor as needed"
1326 /*
1327 * If there is no special read handling, so we can read a bit more and
1328 * put it in the prefetch buffer.
1329 */
1330 if ( cbDst < cbMaxRead
1331 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1332 {
1333 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1334 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1335 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1336 { /* likely */ }
1337 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1338 {
1339 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1340 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1341 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1342 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1343 }
1344 else
1345 {
1346 Log((RT_SUCCESS(rcStrict)
1347 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1348 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1349 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1350 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1351 }
1352 }
1353# endif
1354 /*
1355 * Special read handling, so only read exactly what's needed.
1356 * This is a highly unlikely scenario.
1357 */
1358 else
1359 {
1360 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
1361
1362 /* Check instruction length. */
1363 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1364 if (RT_LIKELY(cbInstr + cbDst <= 15))
1365 { /* likely */ }
1366 else
1367 {
1368 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1369 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1370 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1371 }
1372
1373 /* Do the reading. */
1374 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1375 if (cbToRead > 0)
1376 {
1377 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1378 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1379 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1380 { /* likely */ }
1381 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1382 {
1383 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1384 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1385 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1386 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1387 }
1388 else
1389 {
1390 Log((RT_SUCCESS(rcStrict)
1391 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1392 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1393 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1394 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1395 }
1396 }
1397
1398 /* Update the state and probably return. */
1399 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1400 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1401 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1402
1403 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1404 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1405 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1406 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1407 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1408 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1409 pVCpu->iem.s.pbInstrBuf = NULL;
1410 if (cbToRead == cbDst)
1411 return;
1412 Assert(cbToRead == cbMaxRead);
1413 }
1414
1415 /*
1416 * More to read, loop.
1417 */
1418 cbDst -= cbMaxRead;
1419 pvDst = (uint8_t *)pvDst + cbMaxRead;
1420 }
1421# else /* !IN_RING3 */
1422 RT_NOREF(pvDst, cbDst);
1423 if (pvDst || cbDst)
1424 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1425# endif /* !IN_RING3 */
1426}
1427
1428#else /* !IEM_WITH_CODE_TLB */
1429
1430/**
1431 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1432 * exception if it fails.
1433 *
1434 * @returns Strict VBox status code.
1435 * @param pVCpu The cross context virtual CPU structure of the
1436 * calling thread.
1437 * @param cbMin The minimum number of bytes relative offOpcode
1438 * that must be read.
1439 */
1440VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1441{
1442 /*
1443 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1444 *
1445 * First translate CS:rIP to a physical address.
1446 */
1447 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1448 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1449 uint8_t const cbLeft = cbOpcode - offOpcode;
1450 Assert(cbLeft < cbMin);
1451 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1452
1453 uint32_t cbToTryRead;
1454 RTGCPTR GCPtrNext;
1455 if (IEM_IS_64BIT_CODE(pVCpu))
1456 {
1457 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1458 if (!IEM_IS_CANONICAL(GCPtrNext))
1459 return iemRaiseGeneralProtectionFault0(pVCpu);
1460 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1461 }
1462 else
1463 {
1464 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1465 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1466 GCPtrNext32 += cbOpcode;
1467 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1468 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1469 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1470 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1471 if (!cbToTryRead) /* overflowed */
1472 {
1473 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1474 cbToTryRead = UINT32_MAX;
1475 /** @todo check out wrapping around the code segment. */
1476 }
1477 if (cbToTryRead < cbMin - cbLeft)
1478 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1479 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1480
1481 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1482 if (cbToTryRead > cbLeftOnPage)
1483 cbToTryRead = cbLeftOnPage;
1484 }
1485
1486 /* Restrict to opcode buffer space.
1487
1488 We're making ASSUMPTIONS here based on work done previously in
1489 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1490 be fetched in case of an instruction crossing two pages. */
1491 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1492 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1493 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1494 { /* likely */ }
1495 else
1496 {
1497 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1498 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1499 return iemRaiseGeneralProtectionFault0(pVCpu);
1500 }
1501
1502 PGMPTWALKFAST WalkFast;
1503 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
1504 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1505 &WalkFast);
1506 if (RT_SUCCESS(rc))
1507 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1508 else
1509 {
1510 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1511#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1512 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
1513 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1514#endif
1515 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1516 }
1517 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
1518 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1519
1520 RTGCPHYS const GCPhys = WalkFast.GCPhys;
1521 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1522
1523 /*
1524 * Read the bytes at this address.
1525 *
1526 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1527 * and since PATM should only patch the start of an instruction there
1528 * should be no need to check again here.
1529 */
1530 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1531 {
1532 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1533 cbToTryRead, PGMACCESSORIGIN_IEM);
1534 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1535 { /* likely */ }
1536 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1537 {
1538 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1539 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1540 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1541 }
1542 else
1543 {
1544 Log((RT_SUCCESS(rcStrict)
1545 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1546 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1547 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1548 return rcStrict;
1549 }
1550 }
1551 else
1552 {
1553 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1554 if (RT_SUCCESS(rc))
1555 { /* likely */ }
1556 else
1557 {
1558 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1559 return rc;
1560 }
1561 }
1562 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1563 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1564
1565 return VINF_SUCCESS;
1566}
1567
1568#endif /* !IEM_WITH_CODE_TLB */
1569#ifndef IEM_WITH_SETJMP
1570
1571/**
1572 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1573 *
1574 * @returns Strict VBox status code.
1575 * @param pVCpu The cross context virtual CPU structure of the
1576 * calling thread.
1577 * @param pb Where to return the opcode byte.
1578 */
1579VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1580{
1581 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1582 if (rcStrict == VINF_SUCCESS)
1583 {
1584 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1585 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1586 pVCpu->iem.s.offOpcode = offOpcode + 1;
1587 }
1588 else
1589 *pb = 0;
1590 return rcStrict;
1591}
1592
1593#else /* IEM_WITH_SETJMP */
1594
1595/**
1596 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1597 *
1598 * @returns The opcode byte.
1599 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1600 */
1601uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1602{
1603# ifdef IEM_WITH_CODE_TLB
1604 uint8_t u8;
1605 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1606 return u8;
1607# else
1608 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1609 if (rcStrict == VINF_SUCCESS)
1610 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1611 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1612# endif
1613}
1614
1615#endif /* IEM_WITH_SETJMP */
1616
1617#ifndef IEM_WITH_SETJMP
1618
1619/**
1620 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1621 *
1622 * @returns Strict VBox status code.
1623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1624 * @param pu16 Where to return the opcode dword.
1625 */
1626VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1627{
1628 uint8_t u8;
1629 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1630 if (rcStrict == VINF_SUCCESS)
1631 *pu16 = (int8_t)u8;
1632 return rcStrict;
1633}
1634
1635
1636/**
1637 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1638 *
1639 * @returns Strict VBox status code.
1640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1641 * @param pu32 Where to return the opcode dword.
1642 */
1643VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1644{
1645 uint8_t u8;
1646 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1647 if (rcStrict == VINF_SUCCESS)
1648 *pu32 = (int8_t)u8;
1649 return rcStrict;
1650}
1651
1652
1653/**
1654 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1655 *
1656 * @returns Strict VBox status code.
1657 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1658 * @param pu64 Where to return the opcode qword.
1659 */
1660VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1661{
1662 uint8_t u8;
1663 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1664 if (rcStrict == VINF_SUCCESS)
1665 *pu64 = (int8_t)u8;
1666 return rcStrict;
1667}
1668
1669#endif /* !IEM_WITH_SETJMP */
1670
1671
1672#ifndef IEM_WITH_SETJMP
1673
1674/**
1675 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1676 *
1677 * @returns Strict VBox status code.
1678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1679 * @param pu16 Where to return the opcode word.
1680 */
1681VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1682{
1683 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1684 if (rcStrict == VINF_SUCCESS)
1685 {
1686 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1687# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1688 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1689# else
1690 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1691# endif
1692 pVCpu->iem.s.offOpcode = offOpcode + 2;
1693 }
1694 else
1695 *pu16 = 0;
1696 return rcStrict;
1697}
1698
1699#else /* IEM_WITH_SETJMP */
1700
1701/**
1702 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1703 *
1704 * @returns The opcode word.
1705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1706 */
1707uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1708{
1709# ifdef IEM_WITH_CODE_TLB
1710 uint16_t u16;
1711 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1712 return u16;
1713# else
1714 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1715 if (rcStrict == VINF_SUCCESS)
1716 {
1717 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1718 pVCpu->iem.s.offOpcode += 2;
1719# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1720 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1721# else
1722 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1723# endif
1724 }
1725 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1726# endif
1727}
1728
1729#endif /* IEM_WITH_SETJMP */
1730
1731#ifndef IEM_WITH_SETJMP
1732
1733/**
1734 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1735 *
1736 * @returns Strict VBox status code.
1737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1738 * @param pu32 Where to return the opcode double word.
1739 */
1740VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1741{
1742 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1743 if (rcStrict == VINF_SUCCESS)
1744 {
1745 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1746 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1747 pVCpu->iem.s.offOpcode = offOpcode + 2;
1748 }
1749 else
1750 *pu32 = 0;
1751 return rcStrict;
1752}
1753
1754
1755/**
1756 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1757 *
1758 * @returns Strict VBox status code.
1759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1760 * @param pu64 Where to return the opcode quad word.
1761 */
1762VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1763{
1764 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1765 if (rcStrict == VINF_SUCCESS)
1766 {
1767 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1768 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1769 pVCpu->iem.s.offOpcode = offOpcode + 2;
1770 }
1771 else
1772 *pu64 = 0;
1773 return rcStrict;
1774}
1775
1776#endif /* !IEM_WITH_SETJMP */
1777
1778#ifndef IEM_WITH_SETJMP
1779
1780/**
1781 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1782 *
1783 * @returns Strict VBox status code.
1784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1785 * @param pu32 Where to return the opcode dword.
1786 */
1787VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1788{
1789 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1790 if (rcStrict == VINF_SUCCESS)
1791 {
1792 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1793# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1794 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1795# else
1796 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1797 pVCpu->iem.s.abOpcode[offOpcode + 1],
1798 pVCpu->iem.s.abOpcode[offOpcode + 2],
1799 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1800# endif
1801 pVCpu->iem.s.offOpcode = offOpcode + 4;
1802 }
1803 else
1804 *pu32 = 0;
1805 return rcStrict;
1806}
1807
1808#else /* IEM_WITH_SETJMP */
1809
1810/**
1811 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1812 *
1813 * @returns The opcode dword.
1814 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1815 */
1816uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1817{
1818# ifdef IEM_WITH_CODE_TLB
1819 uint32_t u32;
1820 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1821 return u32;
1822# else
1823 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1824 if (rcStrict == VINF_SUCCESS)
1825 {
1826 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1827 pVCpu->iem.s.offOpcode = offOpcode + 4;
1828# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1829 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1830# else
1831 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1832 pVCpu->iem.s.abOpcode[offOpcode + 1],
1833 pVCpu->iem.s.abOpcode[offOpcode + 2],
1834 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1835# endif
1836 }
1837 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1838# endif
1839}
1840
1841#endif /* IEM_WITH_SETJMP */
1842
1843#ifndef IEM_WITH_SETJMP
1844
1845/**
1846 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1847 *
1848 * @returns Strict VBox status code.
1849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1850 * @param pu64 Where to return the opcode dword.
1851 */
1852VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1853{
1854 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1855 if (rcStrict == VINF_SUCCESS)
1856 {
1857 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1858 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1859 pVCpu->iem.s.abOpcode[offOpcode + 1],
1860 pVCpu->iem.s.abOpcode[offOpcode + 2],
1861 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1862 pVCpu->iem.s.offOpcode = offOpcode + 4;
1863 }
1864 else
1865 *pu64 = 0;
1866 return rcStrict;
1867}
1868
1869
1870/**
1871 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1872 *
1873 * @returns Strict VBox status code.
1874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1875 * @param pu64 Where to return the opcode qword.
1876 */
1877VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1878{
1879 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1880 if (rcStrict == VINF_SUCCESS)
1881 {
1882 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1883 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1884 pVCpu->iem.s.abOpcode[offOpcode + 1],
1885 pVCpu->iem.s.abOpcode[offOpcode + 2],
1886 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1887 pVCpu->iem.s.offOpcode = offOpcode + 4;
1888 }
1889 else
1890 *pu64 = 0;
1891 return rcStrict;
1892}
1893
1894#endif /* !IEM_WITH_SETJMP */
1895
1896#ifndef IEM_WITH_SETJMP
1897
1898/**
1899 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1900 *
1901 * @returns Strict VBox status code.
1902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1903 * @param pu64 Where to return the opcode qword.
1904 */
1905VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1906{
1907 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1908 if (rcStrict == VINF_SUCCESS)
1909 {
1910 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1911# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1912 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1913# else
1914 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1915 pVCpu->iem.s.abOpcode[offOpcode + 1],
1916 pVCpu->iem.s.abOpcode[offOpcode + 2],
1917 pVCpu->iem.s.abOpcode[offOpcode + 3],
1918 pVCpu->iem.s.abOpcode[offOpcode + 4],
1919 pVCpu->iem.s.abOpcode[offOpcode + 5],
1920 pVCpu->iem.s.abOpcode[offOpcode + 6],
1921 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1922# endif
1923 pVCpu->iem.s.offOpcode = offOpcode + 8;
1924 }
1925 else
1926 *pu64 = 0;
1927 return rcStrict;
1928}
1929
1930#else /* IEM_WITH_SETJMP */
1931
1932/**
1933 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1934 *
1935 * @returns The opcode qword.
1936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1937 */
1938uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1939{
1940# ifdef IEM_WITH_CODE_TLB
1941 uint64_t u64;
1942 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1943 return u64;
1944# else
1945 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1946 if (rcStrict == VINF_SUCCESS)
1947 {
1948 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1949 pVCpu->iem.s.offOpcode = offOpcode + 8;
1950# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1951 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1952# else
1953 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1954 pVCpu->iem.s.abOpcode[offOpcode + 1],
1955 pVCpu->iem.s.abOpcode[offOpcode + 2],
1956 pVCpu->iem.s.abOpcode[offOpcode + 3],
1957 pVCpu->iem.s.abOpcode[offOpcode + 4],
1958 pVCpu->iem.s.abOpcode[offOpcode + 5],
1959 pVCpu->iem.s.abOpcode[offOpcode + 6],
1960 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1961# endif
1962 }
1963 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1964# endif
1965}
1966
1967#endif /* IEM_WITH_SETJMP */
1968
1969
1970
1971/** @name Misc Worker Functions.
1972 * @{
1973 */
1974
1975/**
1976 * Gets the exception class for the specified exception vector.
1977 *
1978 * @returns The class of the specified exception.
1979 * @param uVector The exception vector.
1980 */
1981static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1982{
1983 Assert(uVector <= X86_XCPT_LAST);
1984 switch (uVector)
1985 {
1986 case X86_XCPT_DE:
1987 case X86_XCPT_TS:
1988 case X86_XCPT_NP:
1989 case X86_XCPT_SS:
1990 case X86_XCPT_GP:
1991 case X86_XCPT_SX: /* AMD only */
1992 return IEMXCPTCLASS_CONTRIBUTORY;
1993
1994 case X86_XCPT_PF:
1995 case X86_XCPT_VE: /* Intel only */
1996 return IEMXCPTCLASS_PAGE_FAULT;
1997
1998 case X86_XCPT_DF:
1999 return IEMXCPTCLASS_DOUBLE_FAULT;
2000 }
2001 return IEMXCPTCLASS_BENIGN;
2002}
2003
2004
2005/**
2006 * Evaluates how to handle an exception caused during delivery of another event
2007 * (exception / interrupt).
2008 *
2009 * @returns How to handle the recursive exception.
2010 * @param pVCpu The cross context virtual CPU structure of the
2011 * calling thread.
2012 * @param fPrevFlags The flags of the previous event.
2013 * @param uPrevVector The vector of the previous event.
2014 * @param fCurFlags The flags of the current exception.
2015 * @param uCurVector The vector of the current exception.
2016 * @param pfXcptRaiseInfo Where to store additional information about the
2017 * exception condition. Optional.
2018 */
2019VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
2020 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
2021{
2022 /*
2023 * Only CPU exceptions can be raised while delivering other events, software interrupt
2024 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
2025 */
2026 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
2027 Assert(pVCpu); RT_NOREF(pVCpu);
2028 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
2029
2030 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
2031 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
2032 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2033 {
2034 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
2035 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
2036 {
2037 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
2038 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
2039 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
2040 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
2041 {
2042 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2043 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
2044 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
2045 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
2046 uCurVector, pVCpu->cpum.GstCtx.cr2));
2047 }
2048 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2049 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
2050 {
2051 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2052 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
2053 }
2054 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
2055 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2056 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
2057 {
2058 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
2059 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
2060 }
2061 }
2062 else
2063 {
2064 if (uPrevVector == X86_XCPT_NMI)
2065 {
2066 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
2067 if (uCurVector == X86_XCPT_PF)
2068 {
2069 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
2070 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
2071 }
2072 }
2073 else if ( uPrevVector == X86_XCPT_AC
2074 && uCurVector == X86_XCPT_AC)
2075 {
2076 enmRaise = IEMXCPTRAISE_CPU_HANG;
2077 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
2078 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
2079 }
2080 }
2081 }
2082 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
2083 {
2084 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
2085 if (uCurVector == X86_XCPT_PF)
2086 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
2087 }
2088 else
2089 {
2090 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
2091 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
2092 }
2093
2094 if (pfXcptRaiseInfo)
2095 *pfXcptRaiseInfo = fRaiseInfo;
2096 return enmRaise;
2097}
2098
2099
2100/**
2101 * Enters the CPU shutdown state initiated by a triple fault or other
2102 * unrecoverable conditions.
2103 *
2104 * @returns Strict VBox status code.
2105 * @param pVCpu The cross context virtual CPU structure of the
2106 * calling thread.
2107 */
2108static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
2109{
2110 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2111 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
2112
2113 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
2114 {
2115 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
2116 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
2117 }
2118
2119 RT_NOREF(pVCpu);
2120 return VINF_EM_TRIPLE_FAULT;
2121}
2122
2123
2124/**
2125 * Validates a new SS segment.
2126 *
2127 * @returns VBox strict status code.
2128 * @param pVCpu The cross context virtual CPU structure of the
2129 * calling thread.
2130 * @param NewSS The new SS selctor.
2131 * @param uCpl The CPL to load the stack for.
2132 * @param pDesc Where to return the descriptor.
2133 */
2134static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
2135{
2136 /* Null selectors are not allowed (we're not called for dispatching
2137 interrupts with SS=0 in long mode). */
2138 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2139 {
2140 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2141 return iemRaiseTaskSwitchFault0(pVCpu);
2142 }
2143
2144 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2145 if ((NewSS & X86_SEL_RPL) != uCpl)
2146 {
2147 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2148 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2149 }
2150
2151 /*
2152 * Read the descriptor.
2153 */
2154 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
2155 if (rcStrict != VINF_SUCCESS)
2156 return rcStrict;
2157
2158 /*
2159 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2160 */
2161 if (!pDesc->Legacy.Gen.u1DescType)
2162 {
2163 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2164 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2165 }
2166
2167 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2168 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2169 {
2170 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2171 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2172 }
2173 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2174 {
2175 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2176 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2177 }
2178
2179 /* Is it there? */
2180 /** @todo testcase: Is this checked before the canonical / limit check below? */
2181 if (!pDesc->Legacy.Gen.u1Present)
2182 {
2183 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2184 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
2185 }
2186
2187 return VINF_SUCCESS;
2188}
2189
2190/** @} */
2191
2192
2193/** @name Raising Exceptions.
2194 *
2195 * @{
2196 */
2197
2198
2199/**
2200 * Loads the specified stack far pointer from the TSS.
2201 *
2202 * @returns VBox strict status code.
2203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2204 * @param uCpl The CPL to load the stack for.
2205 * @param pSelSS Where to return the new stack segment.
2206 * @param puEsp Where to return the new stack pointer.
2207 */
2208static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
2209{
2210 VBOXSTRICTRC rcStrict;
2211 Assert(uCpl < 4);
2212
2213 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2214 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
2215 {
2216 /*
2217 * 16-bit TSS (X86TSS16).
2218 */
2219 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2220 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2221 {
2222 uint32_t off = uCpl * 4 + 2;
2223 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2224 {
2225 /** @todo check actual access pattern here. */
2226 uint32_t u32Tmp = 0; /* gcc maybe... */
2227 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2228 if (rcStrict == VINF_SUCCESS)
2229 {
2230 *puEsp = RT_LOWORD(u32Tmp);
2231 *pSelSS = RT_HIWORD(u32Tmp);
2232 return VINF_SUCCESS;
2233 }
2234 }
2235 else
2236 {
2237 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2238 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2239 }
2240 break;
2241 }
2242
2243 /*
2244 * 32-bit TSS (X86TSS32).
2245 */
2246 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2247 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2248 {
2249 uint32_t off = uCpl * 8 + 4;
2250 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2251 {
2252/** @todo check actual access pattern here. */
2253 uint64_t u64Tmp;
2254 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2255 if (rcStrict == VINF_SUCCESS)
2256 {
2257 *puEsp = u64Tmp & UINT32_MAX;
2258 *pSelSS = (RTSEL)(u64Tmp >> 32);
2259 return VINF_SUCCESS;
2260 }
2261 }
2262 else
2263 {
2264 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2265 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2266 }
2267 break;
2268 }
2269
2270 default:
2271 AssertFailed();
2272 rcStrict = VERR_IEM_IPE_4;
2273 break;
2274 }
2275
2276 *puEsp = 0; /* make gcc happy */
2277 *pSelSS = 0; /* make gcc happy */
2278 return rcStrict;
2279}
2280
2281
2282/**
2283 * Loads the specified stack pointer from the 64-bit TSS.
2284 *
2285 * @returns VBox strict status code.
2286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2287 * @param uCpl The CPL to load the stack for.
2288 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2289 * @param puRsp Where to return the new stack pointer.
2290 */
2291static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2292{
2293 Assert(uCpl < 4);
2294 Assert(uIst < 8);
2295 *puRsp = 0; /* make gcc happy */
2296
2297 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2298 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2299
2300 uint32_t off;
2301 if (uIst)
2302 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2303 else
2304 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2305 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2306 {
2307 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2308 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2309 }
2310
2311 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2312}
2313
2314
2315/**
2316 * Adjust the CPU state according to the exception being raised.
2317 *
2318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2319 * @param u8Vector The exception that has been raised.
2320 */
2321DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2322{
2323 switch (u8Vector)
2324 {
2325 case X86_XCPT_DB:
2326 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2327 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2328 break;
2329 /** @todo Read the AMD and Intel exception reference... */
2330 }
2331}
2332
2333
2334/**
2335 * Implements exceptions and interrupts for real mode.
2336 *
2337 * @returns VBox strict status code.
2338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2339 * @param cbInstr The number of bytes to offset rIP by in the return
2340 * address.
2341 * @param u8Vector The interrupt / exception vector number.
2342 * @param fFlags The flags.
2343 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2344 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2345 */
2346static VBOXSTRICTRC
2347iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2348 uint8_t cbInstr,
2349 uint8_t u8Vector,
2350 uint32_t fFlags,
2351 uint16_t uErr,
2352 uint64_t uCr2) RT_NOEXCEPT
2353{
2354 NOREF(uErr); NOREF(uCr2);
2355 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2356
2357 /*
2358 * Read the IDT entry.
2359 */
2360 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2361 {
2362 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2363 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2364 }
2365 RTFAR16 Idte;
2366 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2367 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2368 {
2369 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2370 return rcStrict;
2371 }
2372
2373#ifdef LOG_ENABLED
2374 /* If software interrupt, try decode it if logging is enabled and such. */
2375 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2376 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2377 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2378#endif
2379
2380 /*
2381 * Push the stack frame.
2382 */
2383 uint8_t bUnmapInfo;
2384 uint16_t *pu16Frame;
2385 uint64_t uNewRsp;
2386 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2387 if (rcStrict != VINF_SUCCESS)
2388 return rcStrict;
2389
2390 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2391#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2392 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2393 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2394 fEfl |= UINT16_C(0xf000);
2395#endif
2396 pu16Frame[2] = (uint16_t)fEfl;
2397 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2398 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2399 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2400 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2401 return rcStrict;
2402
2403 /*
2404 * Load the vector address into cs:ip and make exception specific state
2405 * adjustments.
2406 */
2407 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2408 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2409 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2410 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2411 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2412 pVCpu->cpum.GstCtx.rip = Idte.off;
2413 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2414 IEMMISC_SET_EFL(pVCpu, fEfl);
2415
2416 /** @todo do we actually do this in real mode? */
2417 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2418 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2419
2420 /*
2421 * Deal with debug events that follows the exception and clear inhibit flags.
2422 */
2423 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2424 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
2425 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2426 else
2427 {
2428 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
2429 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
2430 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2431 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2432 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2433 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2434 return iemRaiseDebugException(pVCpu);
2435 }
2436
2437 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2438 so best leave them alone in case we're in a weird kind of real mode... */
2439
2440 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2441}
2442
2443
2444/**
2445 * Loads a NULL data selector into when coming from V8086 mode.
2446 *
2447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2448 * @param pSReg Pointer to the segment register.
2449 */
2450DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2451{
2452 pSReg->Sel = 0;
2453 pSReg->ValidSel = 0;
2454 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2455 {
2456 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2457 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2458 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2459 }
2460 else
2461 {
2462 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2463 /** @todo check this on AMD-V */
2464 pSReg->u64Base = 0;
2465 pSReg->u32Limit = 0;
2466 }
2467}
2468
2469
2470/**
2471 * Loads a segment selector during a task switch in V8086 mode.
2472 *
2473 * @param pSReg Pointer to the segment register.
2474 * @param uSel The selector value to load.
2475 */
2476DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2477{
2478 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2479 pSReg->Sel = uSel;
2480 pSReg->ValidSel = uSel;
2481 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2482 pSReg->u64Base = uSel << 4;
2483 pSReg->u32Limit = 0xffff;
2484 pSReg->Attr.u = 0xf3;
2485}
2486
2487
2488/**
2489 * Loads a segment selector during a task switch in protected mode.
2490 *
2491 * In this task switch scenario, we would throw \#TS exceptions rather than
2492 * \#GPs.
2493 *
2494 * @returns VBox strict status code.
2495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2496 * @param pSReg Pointer to the segment register.
2497 * @param uSel The new selector value.
2498 *
2499 * @remarks This does _not_ handle CS or SS.
2500 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2501 */
2502static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2503{
2504 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2505
2506 /* Null data selector. */
2507 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2508 {
2509 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2510 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2511 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2512 return VINF_SUCCESS;
2513 }
2514
2515 /* Fetch the descriptor. */
2516 IEMSELDESC Desc;
2517 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2518 if (rcStrict != VINF_SUCCESS)
2519 {
2520 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2521 VBOXSTRICTRC_VAL(rcStrict)));
2522 return rcStrict;
2523 }
2524
2525 /* Must be a data segment or readable code segment. */
2526 if ( !Desc.Legacy.Gen.u1DescType
2527 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2528 {
2529 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2530 Desc.Legacy.Gen.u4Type));
2531 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2532 }
2533
2534 /* Check privileges for data segments and non-conforming code segments. */
2535 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2536 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2537 {
2538 /* The RPL and the new CPL must be less than or equal to the DPL. */
2539 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2540 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2541 {
2542 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2543 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2544 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2545 }
2546 }
2547
2548 /* Is it there? */
2549 if (!Desc.Legacy.Gen.u1Present)
2550 {
2551 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2552 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2553 }
2554
2555 /* The base and limit. */
2556 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2557 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2558
2559 /*
2560 * Ok, everything checked out fine. Now set the accessed bit before
2561 * committing the result into the registers.
2562 */
2563 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2564 {
2565 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2566 if (rcStrict != VINF_SUCCESS)
2567 return rcStrict;
2568 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2569 }
2570
2571 /* Commit */
2572 pSReg->Sel = uSel;
2573 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2574 pSReg->u32Limit = cbLimit;
2575 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2576 pSReg->ValidSel = uSel;
2577 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2578 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2579 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2580
2581 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2582 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2583 return VINF_SUCCESS;
2584}
2585
2586
2587/**
2588 * Performs a task switch.
2589 *
2590 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2591 * caller is responsible for performing the necessary checks (like DPL, TSS
2592 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2593 * reference for JMP, CALL, IRET.
2594 *
2595 * If the task switch is the due to a software interrupt or hardware exception,
2596 * the caller is responsible for validating the TSS selector and descriptor. See
2597 * Intel Instruction reference for INT n.
2598 *
2599 * @returns VBox strict status code.
2600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2601 * @param enmTaskSwitch The cause of the task switch.
2602 * @param uNextEip The EIP effective after the task switch.
2603 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2604 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2605 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2606 * @param SelTss The TSS selector of the new task.
2607 * @param pNewDescTss Pointer to the new TSS descriptor.
2608 */
2609VBOXSTRICTRC
2610iemTaskSwitch(PVMCPUCC pVCpu,
2611 IEMTASKSWITCH enmTaskSwitch,
2612 uint32_t uNextEip,
2613 uint32_t fFlags,
2614 uint16_t uErr,
2615 uint64_t uCr2,
2616 RTSEL SelTss,
2617 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2618{
2619 Assert(!IEM_IS_REAL_MODE(pVCpu));
2620 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2621 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2622
2623 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2624 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2625 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2626 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2627 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2628
2629 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2630 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2631
2632 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2633 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2634
2635 /* Update CR2 in case it's a page-fault. */
2636 /** @todo This should probably be done much earlier in IEM/PGM. See
2637 * @bugref{5653#c49}. */
2638 if (fFlags & IEM_XCPT_FLAGS_CR2)
2639 pVCpu->cpum.GstCtx.cr2 = uCr2;
2640
2641 /*
2642 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2643 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2644 */
2645 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2646 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2647 if (uNewTssLimit < uNewTssLimitMin)
2648 {
2649 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2650 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2651 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2652 }
2653
2654 /*
2655 * Task switches in VMX non-root mode always cause task switches.
2656 * The new TSS must have been read and validated (DPL, limits etc.) before a
2657 * task-switch VM-exit commences.
2658 *
2659 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2660 */
2661 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2662 {
2663 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2664 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2665 }
2666
2667 /*
2668 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2669 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2670 */
2671 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2672 {
2673 uint64_t const uExitInfo1 = SelTss;
2674 uint64_t uExitInfo2 = uErr;
2675 switch (enmTaskSwitch)
2676 {
2677 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2678 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2679 default: break;
2680 }
2681 if (fFlags & IEM_XCPT_FLAGS_ERR)
2682 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2683 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2684 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2685
2686 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2687 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2688 RT_NOREF2(uExitInfo1, uExitInfo2);
2689 }
2690
2691 /*
2692 * Check the current TSS limit. The last written byte to the current TSS during the
2693 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2694 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2695 *
2696 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2697 * end up with smaller than "legal" TSS limits.
2698 */
2699 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2700 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2701 if (uCurTssLimit < uCurTssLimitMin)
2702 {
2703 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2704 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2705 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2706 }
2707
2708 /*
2709 * Verify that the new TSS can be accessed and map it. Map only the required contents
2710 * and not the entire TSS.
2711 */
2712 uint8_t bUnmapInfoNewTss;
2713 void *pvNewTss;
2714 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2715 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2716 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2717 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2718 * not perform correct translation if this happens. See Intel spec. 7.2.1
2719 * "Task-State Segment". */
2720 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2721/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2722 * Consider wrapping the remainder into a function for simpler cleanup. */
2723 if (rcStrict != VINF_SUCCESS)
2724 {
2725 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2726 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2727 return rcStrict;
2728 }
2729
2730 /*
2731 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2732 */
2733 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2734 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2735 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2736 {
2737 uint8_t bUnmapInfoDescCurTss;
2738 PX86DESC pDescCurTss;
2739 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2740 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2741 if (rcStrict != VINF_SUCCESS)
2742 {
2743 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2744 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2745 return rcStrict;
2746 }
2747
2748 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2749 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2750 if (rcStrict != VINF_SUCCESS)
2751 {
2752 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2753 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2754 return rcStrict;
2755 }
2756
2757 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2758 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2759 {
2760 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2761 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2762 fEFlags &= ~X86_EFL_NT;
2763 }
2764 }
2765
2766 /*
2767 * Save the CPU state into the current TSS.
2768 */
2769 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2770 if (GCPtrNewTss == GCPtrCurTss)
2771 {
2772 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2773 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2774 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2775 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2776 pVCpu->cpum.GstCtx.ldtr.Sel));
2777 }
2778 if (fIsNewTss386)
2779 {
2780 /*
2781 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2782 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2783 */
2784 uint8_t bUnmapInfoCurTss32;
2785 void *pvCurTss32;
2786 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2787 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2788 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2789 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2790 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2791 if (rcStrict != VINF_SUCCESS)
2792 {
2793 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2794 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2795 return rcStrict;
2796 }
2797
2798 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2799 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2800 pCurTss32->eip = uNextEip;
2801 pCurTss32->eflags = fEFlags;
2802 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2803 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2804 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2805 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2806 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2807 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2808 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2809 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2810 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2811 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2812 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2813 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2814 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2815 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2816
2817 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2818 if (rcStrict != VINF_SUCCESS)
2819 {
2820 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2821 VBOXSTRICTRC_VAL(rcStrict)));
2822 return rcStrict;
2823 }
2824 }
2825 else
2826 {
2827 /*
2828 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2829 */
2830 uint8_t bUnmapInfoCurTss16;
2831 void *pvCurTss16;
2832 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2833 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2834 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2835 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2836 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2837 if (rcStrict != VINF_SUCCESS)
2838 {
2839 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2840 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2841 return rcStrict;
2842 }
2843
2844 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2845 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2846 pCurTss16->ip = uNextEip;
2847 pCurTss16->flags = (uint16_t)fEFlags;
2848 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2849 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2850 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2851 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2852 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2853 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2854 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2855 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2856 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2857 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2858 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2859 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2860
2861 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2862 if (rcStrict != VINF_SUCCESS)
2863 {
2864 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2865 VBOXSTRICTRC_VAL(rcStrict)));
2866 return rcStrict;
2867 }
2868 }
2869
2870 /*
2871 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2872 */
2873 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2874 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2875 {
2876 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2877 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2878 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2879 }
2880
2881 /*
2882 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2883 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2884 */
2885 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2886 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2887 bool fNewDebugTrap;
2888 if (fIsNewTss386)
2889 {
2890 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2891 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2892 uNewEip = pNewTss32->eip;
2893 uNewEflags = pNewTss32->eflags;
2894 uNewEax = pNewTss32->eax;
2895 uNewEcx = pNewTss32->ecx;
2896 uNewEdx = pNewTss32->edx;
2897 uNewEbx = pNewTss32->ebx;
2898 uNewEsp = pNewTss32->esp;
2899 uNewEbp = pNewTss32->ebp;
2900 uNewEsi = pNewTss32->esi;
2901 uNewEdi = pNewTss32->edi;
2902 uNewES = pNewTss32->es;
2903 uNewCS = pNewTss32->cs;
2904 uNewSS = pNewTss32->ss;
2905 uNewDS = pNewTss32->ds;
2906 uNewFS = pNewTss32->fs;
2907 uNewGS = pNewTss32->gs;
2908 uNewLdt = pNewTss32->selLdt;
2909 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2910 }
2911 else
2912 {
2913 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2914 uNewCr3 = 0;
2915 uNewEip = pNewTss16->ip;
2916 uNewEflags = pNewTss16->flags;
2917 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2918 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2919 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2920 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2921 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2922 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2923 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2924 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2925 uNewES = pNewTss16->es;
2926 uNewCS = pNewTss16->cs;
2927 uNewSS = pNewTss16->ss;
2928 uNewDS = pNewTss16->ds;
2929 uNewFS = 0;
2930 uNewGS = 0;
2931 uNewLdt = pNewTss16->selLdt;
2932 fNewDebugTrap = false;
2933 }
2934
2935 if (GCPtrNewTss == GCPtrCurTss)
2936 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2937 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2938
2939 /*
2940 * We're done accessing the new TSS.
2941 */
2942 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2943 if (rcStrict != VINF_SUCCESS)
2944 {
2945 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2946 return rcStrict;
2947 }
2948
2949 /*
2950 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2951 */
2952 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2953 {
2954 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2955 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2956 if (rcStrict != VINF_SUCCESS)
2957 {
2958 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2959 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2960 return rcStrict;
2961 }
2962
2963 /* Check that the descriptor indicates the new TSS is available (not busy). */
2964 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2965 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2966 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2967
2968 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2969 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2970 if (rcStrict != VINF_SUCCESS)
2971 {
2972 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2973 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2974 return rcStrict;
2975 }
2976 }
2977
2978 /*
2979 * From this point on, we're technically in the new task. We will defer exceptions
2980 * until the completion of the task switch but before executing any instructions in the new task.
2981 */
2982 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2983 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2984 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2985 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2986 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2987 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2988 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2989
2990 /* Set the busy bit in TR. */
2991 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2992
2993 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2994 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2995 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2996 {
2997 uNewEflags |= X86_EFL_NT;
2998 }
2999
3000 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3001 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
3002 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3003
3004 pVCpu->cpum.GstCtx.eip = uNewEip;
3005 pVCpu->cpum.GstCtx.eax = uNewEax;
3006 pVCpu->cpum.GstCtx.ecx = uNewEcx;
3007 pVCpu->cpum.GstCtx.edx = uNewEdx;
3008 pVCpu->cpum.GstCtx.ebx = uNewEbx;
3009 pVCpu->cpum.GstCtx.esp = uNewEsp;
3010 pVCpu->cpum.GstCtx.ebp = uNewEbp;
3011 pVCpu->cpum.GstCtx.esi = uNewEsi;
3012 pVCpu->cpum.GstCtx.edi = uNewEdi;
3013
3014 uNewEflags &= X86_EFL_LIVE_MASK;
3015 uNewEflags |= X86_EFL_RA1_MASK;
3016 IEMMISC_SET_EFL(pVCpu, uNewEflags);
3017
3018 /*
3019 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3020 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3021 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3022 */
3023 pVCpu->cpum.GstCtx.es.Sel = uNewES;
3024 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
3025
3026 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3027 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
3028
3029 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3030 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
3031
3032 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
3033 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
3034
3035 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
3036 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
3037
3038 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
3039 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
3040 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3041
3042 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
3043 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3044 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
3045 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3046
3047 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3048 {
3049 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
3050 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
3051 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
3052 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
3053 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
3054 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
3055 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3056 }
3057
3058 /*
3059 * Switch CR3 for the new task.
3060 */
3061 if ( fIsNewTss386
3062 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
3063 {
3064 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3065 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3066 AssertRCSuccessReturn(rc, rc);
3067
3068 /* Inform PGM. */
3069 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
3070 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
3071 AssertRCReturn(rc, rc);
3072 /* ignore informational status codes */
3073
3074 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
3075 }
3076
3077 /*
3078 * Switch LDTR for the new task.
3079 */
3080 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3081 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
3082 else
3083 {
3084 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
3085
3086 IEMSELDESC DescNewLdt;
3087 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
3088 if (rcStrict != VINF_SUCCESS)
3089 {
3090 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
3091 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
3092 return rcStrict;
3093 }
3094 if ( !DescNewLdt.Legacy.Gen.u1Present
3095 || DescNewLdt.Legacy.Gen.u1DescType
3096 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3097 {
3098 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
3099 uNewLdt, DescNewLdt.Legacy.u));
3100 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3101 }
3102
3103 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
3104 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3105 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3106 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3107 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3108 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3109 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3110 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
3111 }
3112
3113 IEMSELDESC DescSS;
3114 if (IEM_IS_V86_MODE(pVCpu))
3115 {
3116 IEM_SET_CPL(pVCpu, 3);
3117 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
3118 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
3119 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
3120 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
3121 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
3122 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
3123
3124 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
3125 DescSS.Legacy.u = 0;
3126 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
3127 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
3128 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
3129 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
3130 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
3131 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
3132 DescSS.Legacy.Gen.u2Dpl = 3;
3133 }
3134 else
3135 {
3136 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
3137
3138 /*
3139 * Load the stack segment for the new task.
3140 */
3141 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3142 {
3143 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3144 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3145 }
3146
3147 /* Fetch the descriptor. */
3148 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
3149 if (rcStrict != VINF_SUCCESS)
3150 {
3151 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3152 VBOXSTRICTRC_VAL(rcStrict)));
3153 return rcStrict;
3154 }
3155
3156 /* SS must be a data segment and writable. */
3157 if ( !DescSS.Legacy.Gen.u1DescType
3158 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3159 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3160 {
3161 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3162 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3163 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3164 }
3165
3166 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3167 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3168 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3169 {
3170 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3171 uNewCpl));
3172 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3173 }
3174
3175 /* Is it there? */
3176 if (!DescSS.Legacy.Gen.u1Present)
3177 {
3178 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3179 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3180 }
3181
3182 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3183 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3184
3185 /* Set the accessed bit before committing the result into SS. */
3186 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3187 {
3188 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3189 if (rcStrict != VINF_SUCCESS)
3190 return rcStrict;
3191 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3192 }
3193
3194 /* Commit SS. */
3195 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3196 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3197 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3198 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
3199 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
3200 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3201 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
3202
3203 /* CPL has changed, update IEM before loading rest of segments. */
3204 IEM_SET_CPL(pVCpu, uNewCpl);
3205
3206 /*
3207 * Load the data segments for the new task.
3208 */
3209 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
3210 if (rcStrict != VINF_SUCCESS)
3211 return rcStrict;
3212 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
3213 if (rcStrict != VINF_SUCCESS)
3214 return rcStrict;
3215 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
3216 if (rcStrict != VINF_SUCCESS)
3217 return rcStrict;
3218 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
3219 if (rcStrict != VINF_SUCCESS)
3220 return rcStrict;
3221
3222 /*
3223 * Load the code segment for the new task.
3224 */
3225 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3226 {
3227 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3228 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3229 }
3230
3231 /* Fetch the descriptor. */
3232 IEMSELDESC DescCS;
3233 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
3234 if (rcStrict != VINF_SUCCESS)
3235 {
3236 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3237 return rcStrict;
3238 }
3239
3240 /* CS must be a code segment. */
3241 if ( !DescCS.Legacy.Gen.u1DescType
3242 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3243 {
3244 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3245 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3246 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3247 }
3248
3249 /* For conforming CS, DPL must be less than or equal to the RPL. */
3250 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3251 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3252 {
3253 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3254 DescCS.Legacy.Gen.u2Dpl));
3255 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3256 }
3257
3258 /* For non-conforming CS, DPL must match RPL. */
3259 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3260 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3261 {
3262 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3263 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3264 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3265 }
3266
3267 /* Is it there? */
3268 if (!DescCS.Legacy.Gen.u1Present)
3269 {
3270 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3271 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3272 }
3273
3274 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3275 u64Base = X86DESC_BASE(&DescCS.Legacy);
3276
3277 /* Set the accessed bit before committing the result into CS. */
3278 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3279 {
3280 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3281 if (rcStrict != VINF_SUCCESS)
3282 return rcStrict;
3283 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3284 }
3285
3286 /* Commit CS. */
3287 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3288 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3289 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3290 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3291 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3292 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3293 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3294 }
3295
3296 /* Make sure the CPU mode is correct. */
3297 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3298 if (fExecNew != pVCpu->iem.s.fExec)
3299 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3300 pVCpu->iem.s.fExec = fExecNew;
3301
3302 /** @todo Debug trap. */
3303 if (fIsNewTss386 && fNewDebugTrap)
3304 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3305
3306 /*
3307 * Construct the error code masks based on what caused this task switch.
3308 * See Intel Instruction reference for INT.
3309 */
3310 uint16_t uExt;
3311 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3312 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3313 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3314 uExt = 1;
3315 else
3316 uExt = 0;
3317
3318 /*
3319 * Push any error code on to the new stack.
3320 */
3321 if (fFlags & IEM_XCPT_FLAGS_ERR)
3322 {
3323 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3324 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3325 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3326
3327 /* Check that there is sufficient space on the stack. */
3328 /** @todo Factor out segment limit checking for normal/expand down segments
3329 * into a separate function. */
3330 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3331 {
3332 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3333 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3334 {
3335 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3336 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3337 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3338 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3339 }
3340 }
3341 else
3342 {
3343 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3344 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3345 {
3346 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3347 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3348 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3349 }
3350 }
3351
3352
3353 if (fIsNewTss386)
3354 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3355 else
3356 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3357 if (rcStrict != VINF_SUCCESS)
3358 {
3359 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3360 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3361 return rcStrict;
3362 }
3363 }
3364
3365 /* Check the new EIP against the new CS limit. */
3366 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3367 {
3368 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3369 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3370 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3371 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3372 }
3373
3374 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3375 pVCpu->cpum.GstCtx.ss.Sel));
3376 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3377}
3378
3379
3380/**
3381 * Implements exceptions and interrupts for protected mode.
3382 *
3383 * @returns VBox strict status code.
3384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3385 * @param cbInstr The number of bytes to offset rIP by in the return
3386 * address.
3387 * @param u8Vector The interrupt / exception vector number.
3388 * @param fFlags The flags.
3389 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3390 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3391 */
3392static VBOXSTRICTRC
3393iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3394 uint8_t cbInstr,
3395 uint8_t u8Vector,
3396 uint32_t fFlags,
3397 uint16_t uErr,
3398 uint64_t uCr2) RT_NOEXCEPT
3399{
3400 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3401
3402 /*
3403 * Read the IDT entry.
3404 */
3405 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3406 {
3407 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3408 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3409 }
3410 X86DESC Idte;
3411 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3412 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3413 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3414 {
3415 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3416 return rcStrict;
3417 }
3418 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3419 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3420 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3421 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3422
3423 /*
3424 * Check the descriptor type, DPL and such.
3425 * ASSUMES this is done in the same order as described for call-gate calls.
3426 */
3427 if (Idte.Gate.u1DescType)
3428 {
3429 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3430 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3431 }
3432 bool fTaskGate = false;
3433 uint8_t f32BitGate = true;
3434 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3435 switch (Idte.Gate.u4Type)
3436 {
3437 case X86_SEL_TYPE_SYS_UNDEFINED:
3438 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3439 case X86_SEL_TYPE_SYS_LDT:
3440 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3441 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3442 case X86_SEL_TYPE_SYS_UNDEFINED2:
3443 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3444 case X86_SEL_TYPE_SYS_UNDEFINED3:
3445 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3446 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3447 case X86_SEL_TYPE_SYS_UNDEFINED4:
3448 {
3449 /** @todo check what actually happens when the type is wrong...
3450 * esp. call gates. */
3451 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3452 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3453 }
3454
3455 case X86_SEL_TYPE_SYS_286_INT_GATE:
3456 f32BitGate = false;
3457 RT_FALL_THRU();
3458 case X86_SEL_TYPE_SYS_386_INT_GATE:
3459 fEflToClear |= X86_EFL_IF;
3460 break;
3461
3462 case X86_SEL_TYPE_SYS_TASK_GATE:
3463 fTaskGate = true;
3464#ifndef IEM_IMPLEMENTS_TASKSWITCH
3465 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3466#endif
3467 break;
3468
3469 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3470 f32BitGate = false;
3471 break;
3472 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3473 break;
3474
3475 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3476 }
3477
3478 /* Check DPL against CPL if applicable. */
3479 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3480 {
3481 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3482 {
3483 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3484 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3485 }
3486 }
3487
3488 /* Is it there? */
3489 if (!Idte.Gate.u1Present)
3490 {
3491 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3492 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3493 }
3494
3495 /* Is it a task-gate? */
3496 if (fTaskGate)
3497 {
3498 /*
3499 * Construct the error code masks based on what caused this task switch.
3500 * See Intel Instruction reference for INT.
3501 */
3502 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3503 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3504 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3505 RTSEL SelTss = Idte.Gate.u16Sel;
3506
3507 /*
3508 * Fetch the TSS descriptor in the GDT.
3509 */
3510 IEMSELDESC DescTSS;
3511 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3512 if (rcStrict != VINF_SUCCESS)
3513 {
3514 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3515 VBOXSTRICTRC_VAL(rcStrict)));
3516 return rcStrict;
3517 }
3518
3519 /* The TSS descriptor must be a system segment and be available (not busy). */
3520 if ( DescTSS.Legacy.Gen.u1DescType
3521 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3522 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3523 {
3524 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3525 u8Vector, SelTss, DescTSS.Legacy.au64));
3526 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3527 }
3528
3529 /* The TSS must be present. */
3530 if (!DescTSS.Legacy.Gen.u1Present)
3531 {
3532 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3533 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3534 }
3535
3536 /* Do the actual task switch. */
3537 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3538 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3539 fFlags, uErr, uCr2, SelTss, &DescTSS);
3540 }
3541
3542 /* A null CS is bad. */
3543 RTSEL NewCS = Idte.Gate.u16Sel;
3544 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3545 {
3546 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3547 return iemRaiseGeneralProtectionFault0(pVCpu);
3548 }
3549
3550 /* Fetch the descriptor for the new CS. */
3551 IEMSELDESC DescCS;
3552 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3553 if (rcStrict != VINF_SUCCESS)
3554 {
3555 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3556 return rcStrict;
3557 }
3558
3559 /* Must be a code segment. */
3560 if (!DescCS.Legacy.Gen.u1DescType)
3561 {
3562 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3563 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3564 }
3565 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3566 {
3567 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3568 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3569 }
3570
3571 /* Don't allow lowering the privilege level. */
3572 /** @todo Does the lowering of privileges apply to software interrupts
3573 * only? This has bearings on the more-privileged or
3574 * same-privilege stack behavior further down. A testcase would
3575 * be nice. */
3576 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3577 {
3578 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3579 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3580 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3581 }
3582
3583 /* Make sure the selector is present. */
3584 if (!DescCS.Legacy.Gen.u1Present)
3585 {
3586 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3587 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3588 }
3589
3590#ifdef LOG_ENABLED
3591 /* If software interrupt, try decode it if logging is enabled and such. */
3592 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3593 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3594 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3595#endif
3596
3597 /* Check the new EIP against the new CS limit. */
3598 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3599 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3600 ? Idte.Gate.u16OffsetLow
3601 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3602 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3603 if (uNewEip > cbLimitCS)
3604 {
3605 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3606 u8Vector, uNewEip, cbLimitCS, NewCS));
3607 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3608 }
3609 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3610
3611 /* Calc the flag image to push. */
3612 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3613 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3614 fEfl &= ~X86_EFL_RF;
3615 else
3616 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3617
3618 /* From V8086 mode only go to CPL 0. */
3619 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3620 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3621 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3622 {
3623 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3624 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3625 }
3626
3627 /*
3628 * If the privilege level changes, we need to get a new stack from the TSS.
3629 * This in turns means validating the new SS and ESP...
3630 */
3631 if (uNewCpl != IEM_GET_CPL(pVCpu))
3632 {
3633 RTSEL NewSS;
3634 uint32_t uNewEsp;
3635 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3636 if (rcStrict != VINF_SUCCESS)
3637 return rcStrict;
3638
3639 IEMSELDESC DescSS;
3640 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3641 if (rcStrict != VINF_SUCCESS)
3642 return rcStrict;
3643 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3644 if (!DescSS.Legacy.Gen.u1DefBig)
3645 {
3646 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3647 uNewEsp = (uint16_t)uNewEsp;
3648 }
3649
3650 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3651
3652 /* Check that there is sufficient space for the stack frame. */
3653 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3654 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3655 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3656 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3657
3658 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3659 {
3660 if ( uNewEsp - 1 > cbLimitSS
3661 || uNewEsp < cbStackFrame)
3662 {
3663 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3664 u8Vector, NewSS, uNewEsp, cbStackFrame));
3665 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3666 }
3667 }
3668 else
3669 {
3670 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3671 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3672 {
3673 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3674 u8Vector, NewSS, uNewEsp, cbStackFrame));
3675 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3676 }
3677 }
3678
3679 /*
3680 * Start making changes.
3681 */
3682
3683 /* Set the new CPL so that stack accesses use it. */
3684 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3685 IEM_SET_CPL(pVCpu, uNewCpl);
3686
3687 /* Create the stack frame. */
3688 uint8_t bUnmapInfoStackFrame;
3689 RTPTRUNION uStackFrame;
3690 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3691 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3692 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3693 if (rcStrict != VINF_SUCCESS)
3694 return rcStrict;
3695 if (f32BitGate)
3696 {
3697 if (fFlags & IEM_XCPT_FLAGS_ERR)
3698 *uStackFrame.pu32++ = uErr;
3699 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3700 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3701 uStackFrame.pu32[2] = fEfl;
3702 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3703 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3704 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3705 if (fEfl & X86_EFL_VM)
3706 {
3707 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3708 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3709 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3710 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3711 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3712 }
3713 }
3714 else
3715 {
3716 if (fFlags & IEM_XCPT_FLAGS_ERR)
3717 *uStackFrame.pu16++ = uErr;
3718 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3719 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3720 uStackFrame.pu16[2] = fEfl;
3721 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3722 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3723 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3724 if (fEfl & X86_EFL_VM)
3725 {
3726 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3727 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3728 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3729 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3730 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3731 }
3732 }
3733 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3734 if (rcStrict != VINF_SUCCESS)
3735 return rcStrict;
3736
3737 /* Mark the selectors 'accessed' (hope this is the correct time). */
3738 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3739 * after pushing the stack frame? (Write protect the gdt + stack to
3740 * find out.) */
3741 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3742 {
3743 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3744 if (rcStrict != VINF_SUCCESS)
3745 return rcStrict;
3746 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3747 }
3748
3749 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3750 {
3751 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3752 if (rcStrict != VINF_SUCCESS)
3753 return rcStrict;
3754 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3755 }
3756
3757 /*
3758 * Start comitting the register changes (joins with the DPL=CPL branch).
3759 */
3760 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3761 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3762 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3763 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3764 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3765 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3766 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3767 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3768 * SP is loaded).
3769 * Need to check the other combinations too:
3770 * - 16-bit TSS, 32-bit handler
3771 * - 32-bit TSS, 16-bit handler */
3772 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3773 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3774 else
3775 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3776
3777 if (fEfl & X86_EFL_VM)
3778 {
3779 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3780 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3781 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3782 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3783 }
3784 }
3785 /*
3786 * Same privilege, no stack change and smaller stack frame.
3787 */
3788 else
3789 {
3790 uint64_t uNewRsp;
3791 uint8_t bUnmapInfoStackFrame;
3792 RTPTRUNION uStackFrame;
3793 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3794 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3795 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3796 if (rcStrict != VINF_SUCCESS)
3797 return rcStrict;
3798
3799 if (f32BitGate)
3800 {
3801 if (fFlags & IEM_XCPT_FLAGS_ERR)
3802 *uStackFrame.pu32++ = uErr;
3803 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3804 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3805 uStackFrame.pu32[2] = fEfl;
3806 }
3807 else
3808 {
3809 if (fFlags & IEM_XCPT_FLAGS_ERR)
3810 *uStackFrame.pu16++ = uErr;
3811 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3812 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3813 uStackFrame.pu16[2] = fEfl;
3814 }
3815 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3816 if (rcStrict != VINF_SUCCESS)
3817 return rcStrict;
3818
3819 /* Mark the CS selector as 'accessed'. */
3820 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3821 {
3822 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3823 if (rcStrict != VINF_SUCCESS)
3824 return rcStrict;
3825 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3826 }
3827
3828 /*
3829 * Start committing the register changes (joins with the other branch).
3830 */
3831 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3832 }
3833
3834 /* ... register committing continues. */
3835 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3836 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3837 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3838 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3839 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3840 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3841
3842 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3843 fEfl &= ~fEflToClear;
3844 IEMMISC_SET_EFL(pVCpu, fEfl);
3845
3846 if (fFlags & IEM_XCPT_FLAGS_CR2)
3847 pVCpu->cpum.GstCtx.cr2 = uCr2;
3848
3849 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3850 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3851
3852 /* Make sure the execution flags are correct. */
3853 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3854 if (fExecNew != pVCpu->iem.s.fExec)
3855 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3856 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3857 pVCpu->iem.s.fExec = fExecNew;
3858 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3859
3860 /*
3861 * Deal with debug events that follows the exception and clear inhibit flags.
3862 */
3863 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3864 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
3865 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3866 else
3867 {
3868 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
3869 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
3870 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
3871 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
3872 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
3873 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3874 return iemRaiseDebugException(pVCpu);
3875 }
3876
3877 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3878}
3879
3880
3881/**
3882 * Implements exceptions and interrupts for long mode.
3883 *
3884 * @returns VBox strict status code.
3885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3886 * @param cbInstr The number of bytes to offset rIP by in the return
3887 * address.
3888 * @param u8Vector The interrupt / exception vector number.
3889 * @param fFlags The flags.
3890 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3891 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3892 */
3893static VBOXSTRICTRC
3894iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3895 uint8_t cbInstr,
3896 uint8_t u8Vector,
3897 uint32_t fFlags,
3898 uint16_t uErr,
3899 uint64_t uCr2) RT_NOEXCEPT
3900{
3901 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3902
3903 /*
3904 * Read the IDT entry.
3905 */
3906 uint16_t offIdt = (uint16_t)u8Vector << 4;
3907 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3908 {
3909 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3910 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3911 }
3912 X86DESC64 Idte;
3913#ifdef _MSC_VER /* Shut up silly compiler warning. */
3914 Idte.au64[0] = 0;
3915 Idte.au64[1] = 0;
3916#endif
3917 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3918 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3919 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3920 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3921 {
3922 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3923 return rcStrict;
3924 }
3925 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3926 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3927 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3928
3929 /*
3930 * Check the descriptor type, DPL and such.
3931 * ASSUMES this is done in the same order as described for call-gate calls.
3932 */
3933 if (Idte.Gate.u1DescType)
3934 {
3935 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3936 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3937 }
3938 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3939 switch (Idte.Gate.u4Type)
3940 {
3941 case AMD64_SEL_TYPE_SYS_INT_GATE:
3942 fEflToClear |= X86_EFL_IF;
3943 break;
3944 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3945 break;
3946
3947 default:
3948 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3949 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3950 }
3951
3952 /* Check DPL against CPL if applicable. */
3953 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3954 {
3955 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3956 {
3957 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3958 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3959 }
3960 }
3961
3962 /* Is it there? */
3963 if (!Idte.Gate.u1Present)
3964 {
3965 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3966 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3967 }
3968
3969 /* A null CS is bad. */
3970 RTSEL NewCS = Idte.Gate.u16Sel;
3971 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3972 {
3973 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3974 return iemRaiseGeneralProtectionFault0(pVCpu);
3975 }
3976
3977 /* Fetch the descriptor for the new CS. */
3978 IEMSELDESC DescCS;
3979 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3980 if (rcStrict != VINF_SUCCESS)
3981 {
3982 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3983 return rcStrict;
3984 }
3985
3986 /* Must be a 64-bit code segment. */
3987 if (!DescCS.Long.Gen.u1DescType)
3988 {
3989 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3990 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3991 }
3992 if ( !DescCS.Long.Gen.u1Long
3993 || DescCS.Long.Gen.u1DefBig
3994 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3995 {
3996 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3997 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3998 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3999 }
4000
4001 /* Don't allow lowering the privilege level. For non-conforming CS
4002 selectors, the CS.DPL sets the privilege level the trap/interrupt
4003 handler runs at. For conforming CS selectors, the CPL remains
4004 unchanged, but the CS.DPL must be <= CPL. */
4005 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4006 * when CPU in Ring-0. Result \#GP? */
4007 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
4008 {
4009 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4010 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
4011 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4012 }
4013
4014
4015 /* Make sure the selector is present. */
4016 if (!DescCS.Legacy.Gen.u1Present)
4017 {
4018 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4019 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4020 }
4021
4022 /* Check that the new RIP is canonical. */
4023 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4024 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4025 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4026 if (!IEM_IS_CANONICAL(uNewRip))
4027 {
4028 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4029 return iemRaiseGeneralProtectionFault0(pVCpu);
4030 }
4031
4032 /*
4033 * If the privilege level changes or if the IST isn't zero, we need to get
4034 * a new stack from the TSS.
4035 */
4036 uint64_t uNewRsp;
4037 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4038 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
4039 if ( uNewCpl != IEM_GET_CPL(pVCpu)
4040 || Idte.Gate.u3IST != 0)
4041 {
4042 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4043 if (rcStrict != VINF_SUCCESS)
4044 return rcStrict;
4045 }
4046 else
4047 uNewRsp = pVCpu->cpum.GstCtx.rsp;
4048 uNewRsp &= ~(uint64_t)0xf;
4049
4050 /*
4051 * Calc the flag image to push.
4052 */
4053 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4054 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4055 fEfl &= ~X86_EFL_RF;
4056 else
4057 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4058
4059 /*
4060 * Start making changes.
4061 */
4062 /* Set the new CPL so that stack accesses use it. */
4063 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
4064 IEM_SET_CPL(pVCpu, uNewCpl);
4065/** @todo Setting CPL this early seems wrong as it would affect and errors we
4066 * raise accessing the stack and (?) GDT/LDT... */
4067
4068 /* Create the stack frame. */
4069 uint8_t bUnmapInfoStackFrame;
4070 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4071 RTPTRUNION uStackFrame;
4072 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
4073 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
4074 if (rcStrict != VINF_SUCCESS)
4075 return rcStrict;
4076
4077 if (fFlags & IEM_XCPT_FLAGS_ERR)
4078 *uStackFrame.pu64++ = uErr;
4079 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
4080 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4081 uStackFrame.pu64[2] = fEfl;
4082 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
4083 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
4084 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
4085 if (rcStrict != VINF_SUCCESS)
4086 return rcStrict;
4087
4088 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4089 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4090 * after pushing the stack frame? (Write protect the gdt + stack to
4091 * find out.) */
4092 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4093 {
4094 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4095 if (rcStrict != VINF_SUCCESS)
4096 return rcStrict;
4097 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4098 }
4099
4100 /*
4101 * Start comitting the register changes.
4102 */
4103 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4104 * hidden registers when interrupting 32-bit or 16-bit code! */
4105 if (uNewCpl != uOldCpl)
4106 {
4107 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
4108 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
4109 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4110 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4111 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4112 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4113 }
4114 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
4115 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4116 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4117 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4118 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4119 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4120 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4121 pVCpu->cpum.GstCtx.rip = uNewRip;
4122
4123 fEfl &= ~fEflToClear;
4124 IEMMISC_SET_EFL(pVCpu, fEfl);
4125
4126 if (fFlags & IEM_XCPT_FLAGS_CR2)
4127 pVCpu->cpum.GstCtx.cr2 = uCr2;
4128
4129 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4130 iemRaiseXcptAdjustState(pVCpu, u8Vector);
4131
4132 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
4133
4134 /*
4135 * Deal with debug events that follows the exception and clear inhibit flags.
4136 */
4137 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4138 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
4139 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4140 else
4141 {
4142 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
4143 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
4144 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
4145 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4146 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
4147 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4148 return iemRaiseDebugException(pVCpu);
4149 }
4150
4151 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4152}
4153
4154
4155/**
4156 * Implements exceptions and interrupts.
4157 *
4158 * All exceptions and interrupts goes thru this function!
4159 *
4160 * @returns VBox strict status code.
4161 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4162 * @param cbInstr The number of bytes to offset rIP by in the return
4163 * address.
4164 * @param u8Vector The interrupt / exception vector number.
4165 * @param fFlags The flags.
4166 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4167 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4168 */
4169VBOXSTRICTRC
4170iemRaiseXcptOrInt(PVMCPUCC pVCpu,
4171 uint8_t cbInstr,
4172 uint8_t u8Vector,
4173 uint32_t fFlags,
4174 uint16_t uErr,
4175 uint64_t uCr2) RT_NOEXCEPT
4176{
4177 /*
4178 * Get all the state that we might need here.
4179 */
4180 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4181 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4182
4183#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
4184 /*
4185 * Flush prefetch buffer
4186 */
4187 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4188#endif
4189
4190 /*
4191 * Perform the V8086 IOPL check and upgrade the fault without nesting.
4192 */
4193 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
4194 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
4195 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
4196 | IEM_XCPT_FLAGS_BP_INSTR
4197 | IEM_XCPT_FLAGS_ICEBP_INSTR
4198 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
4199 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
4200 {
4201 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
4202 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4203 u8Vector = X86_XCPT_GP;
4204 uErr = 0;
4205 }
4206
4207 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
4208#ifdef DBGFTRACE_ENABLED
4209 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
4210 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
4211 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
4212#endif
4213
4214 /*
4215 * Check if DBGF wants to intercept the exception.
4216 */
4217 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
4218 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
4219 { /* likely */ }
4220 else
4221 {
4222 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
4223 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
4224 if (rcStrict != VINF_SUCCESS)
4225 return rcStrict;
4226 }
4227
4228 /*
4229 * Evaluate whether NMI blocking should be in effect.
4230 * Normally, NMI blocking is in effect whenever we inject an NMI.
4231 */
4232 bool fBlockNmi = u8Vector == X86_XCPT_NMI
4233 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
4234
4235#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4236 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4237 {
4238 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
4239 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4240 return rcStrict0;
4241
4242 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
4243 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
4244 {
4245 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
4246 fBlockNmi = false;
4247 }
4248 }
4249#endif
4250
4251#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4252 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
4253 {
4254 /*
4255 * If the event is being injected as part of VMRUN, it isn't subject to event
4256 * intercepts in the nested-guest. However, secondary exceptions that occur
4257 * during injection of any event -are- subject to exception intercepts.
4258 *
4259 * See AMD spec. 15.20 "Event Injection".
4260 */
4261 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
4262 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
4263 else
4264 {
4265 /*
4266 * Check and handle if the event being raised is intercepted.
4267 */
4268 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4269 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
4270 return rcStrict0;
4271 }
4272 }
4273#endif
4274
4275 /*
4276 * Set NMI blocking if necessary.
4277 */
4278 if (fBlockNmi)
4279 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
4280
4281 /*
4282 * Do recursion accounting.
4283 */
4284 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
4285 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
4286 if (pVCpu->iem.s.cXcptRecursions == 0)
4287 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4288 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
4289 else
4290 {
4291 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4292 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
4293 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4294
4295 if (pVCpu->iem.s.cXcptRecursions >= 4)
4296 {
4297#ifdef DEBUG_bird
4298 AssertFailed();
4299#endif
4300 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4301 }
4302
4303 /*
4304 * Evaluate the sequence of recurring events.
4305 */
4306 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4307 NULL /* pXcptRaiseInfo */);
4308 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4309 { /* likely */ }
4310 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4311 {
4312 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4313 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4314 u8Vector = X86_XCPT_DF;
4315 uErr = 0;
4316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4317 /* VMX nested-guest #DF intercept needs to be checked here. */
4318 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4319 {
4320 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4321 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4322 return rcStrict0;
4323 }
4324#endif
4325 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4326 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4327 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4328 }
4329 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4330 {
4331 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4332 return iemInitiateCpuShutdown(pVCpu);
4333 }
4334 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4335 {
4336 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4337 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4338 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4339 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4340 return VERR_EM_GUEST_CPU_HANG;
4341 }
4342 else
4343 {
4344 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4345 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4346 return VERR_IEM_IPE_9;
4347 }
4348
4349 /*
4350 * The 'EXT' bit is set when an exception occurs during deliver of an external
4351 * event (such as an interrupt or earlier exception)[1]. Privileged software
4352 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4353 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4354 *
4355 * [1] - Intel spec. 6.13 "Error Code"
4356 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4357 * [3] - Intel Instruction reference for INT n.
4358 */
4359 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4360 && (fFlags & IEM_XCPT_FLAGS_ERR)
4361 && u8Vector != X86_XCPT_PF
4362 && u8Vector != X86_XCPT_DF)
4363 {
4364 uErr |= X86_TRAP_ERR_EXTERNAL;
4365 }
4366 }
4367
4368 pVCpu->iem.s.cXcptRecursions++;
4369 pVCpu->iem.s.uCurXcpt = u8Vector;
4370 pVCpu->iem.s.fCurXcpt = fFlags;
4371 pVCpu->iem.s.uCurXcptErr = uErr;
4372 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4373
4374 /*
4375 * Extensive logging.
4376 */
4377#if defined(LOG_ENABLED) && defined(IN_RING3)
4378 if (LogIs3Enabled())
4379 {
4380 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4381 char szRegs[4096];
4382 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4383 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4384 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4385 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4386 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4387 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4388 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4389 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4390 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4391 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4392 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4393 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4394 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4395 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4396 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4397 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4398 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4399 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4400 " efer=%016VR{efer}\n"
4401 " pat=%016VR{pat}\n"
4402 " sf_mask=%016VR{sf_mask}\n"
4403 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4404 " lstar=%016VR{lstar}\n"
4405 " star=%016VR{star} cstar=%016VR{cstar}\n"
4406 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4407 );
4408
4409 char szInstr[256];
4410 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4411 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4412 szInstr, sizeof(szInstr), NULL);
4413 Log3(("%s%s\n", szRegs, szInstr));
4414 }
4415#endif /* LOG_ENABLED */
4416
4417 /*
4418 * Stats.
4419 */
4420 uint64_t const uTimestamp = ASMReadTSC();
4421 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4422 {
4423 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4424 EMHistoryAddExit(pVCpu,
4425 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4426 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4427 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4428 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4429 }
4430 else
4431 {
4432 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4433 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4434 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4435 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4436 if (fFlags & IEM_XCPT_FLAGS_ERR)
4437 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4438 if (fFlags & IEM_XCPT_FLAGS_CR2)
4439 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4440 }
4441
4442 /*
4443 * Hack alert! Convert incoming debug events to slient on Intel.
4444 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
4445 */
4446 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4447 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4448 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
4449 { /* ignore */ }
4450 else
4451 {
4452 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",
4453 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));
4454 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
4455 | CPUMCTX_DBG_HIT_DRX_SILENT;
4456 }
4457
4458 /*
4459 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4460 * to ensure that a stale TLB or paging cache entry will only cause one
4461 * spurious #PF.
4462 */
4463 if ( u8Vector == X86_XCPT_PF
4464 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4465 IEMTlbInvalidatePage(pVCpu, uCr2);
4466
4467 /*
4468 * Call the mode specific worker function.
4469 */
4470 VBOXSTRICTRC rcStrict;
4471 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4472 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4473 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4474 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4475 else
4476 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4477
4478 /* Flush the prefetch buffer. */
4479 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4480
4481 /*
4482 * Unwind.
4483 */
4484 pVCpu->iem.s.cXcptRecursions--;
4485 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4486 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4487 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4488 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4489 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4490 return rcStrict;
4491}
4492
4493#ifdef IEM_WITH_SETJMP
4494/**
4495 * See iemRaiseXcptOrInt. Will not return.
4496 */
4497DECL_NO_RETURN(void)
4498iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4499 uint8_t cbInstr,
4500 uint8_t u8Vector,
4501 uint32_t fFlags,
4502 uint16_t uErr,
4503 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4504{
4505 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4506 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4507}
4508#endif
4509
4510
4511/** \#DE - 00. */
4512VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4513{
4514 if (GCMIsInterceptingXcptDE(pVCpu))
4515 {
4516 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
4517 if (rc == VINF_SUCCESS)
4518 {
4519 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
4520 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
4521 }
4522 }
4523 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4524}
4525
4526
4527#ifdef IEM_WITH_SETJMP
4528/** \#DE - 00. */
4529DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4530{
4531 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4532}
4533#endif
4534
4535
4536/** \#DB - 01.
4537 * @note This automatically clear DR7.GD. */
4538VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4539{
4540 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4541 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4542 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4543}
4544
4545
4546/** \#BR - 05. */
4547VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4548{
4549 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4550}
4551
4552
4553/** \#UD - 06. */
4554VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4555{
4556 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4557}
4558
4559
4560#ifdef IEM_WITH_SETJMP
4561/** \#UD - 06. */
4562DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4563{
4564 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4565}
4566#endif
4567
4568
4569/** \#NM - 07. */
4570VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4571{
4572 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4573}
4574
4575
4576#ifdef IEM_WITH_SETJMP
4577/** \#NM - 07. */
4578DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4579{
4580 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4581}
4582#endif
4583
4584
4585/** \#TS(err) - 0a. */
4586VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4587{
4588 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4589}
4590
4591
4592/** \#TS(tr) - 0a. */
4593VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4594{
4595 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4596 pVCpu->cpum.GstCtx.tr.Sel, 0);
4597}
4598
4599
4600/** \#TS(0) - 0a. */
4601VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4602{
4603 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4604 0, 0);
4605}
4606
4607
4608/** \#TS(err) - 0a. */
4609VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4610{
4611 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4612 uSel & X86_SEL_MASK_OFF_RPL, 0);
4613}
4614
4615
4616/** \#NP(err) - 0b. */
4617VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4618{
4619 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4620}
4621
4622
4623/** \#NP(sel) - 0b. */
4624VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4625{
4626 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4627 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4628 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4629 uSel & ~X86_SEL_RPL, 0);
4630}
4631
4632
4633/** \#SS(seg) - 0c. */
4634VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4635{
4636 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4637 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4638 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4639 uSel & ~X86_SEL_RPL, 0);
4640}
4641
4642
4643/** \#SS(err) - 0c. */
4644VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4645{
4646 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4647 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4648 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4649}
4650
4651
4652/** \#GP(n) - 0d. */
4653VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4654{
4655 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4656 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4657}
4658
4659
4660/** \#GP(0) - 0d. */
4661VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4662{
4663 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4664 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4665}
4666
4667#ifdef IEM_WITH_SETJMP
4668/** \#GP(0) - 0d. */
4669DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4670{
4671 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4672 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4673}
4674#endif
4675
4676
4677/** \#GP(sel) - 0d. */
4678VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4679{
4680 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4681 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4682 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4683 Sel & ~X86_SEL_RPL, 0);
4684}
4685
4686
4687/** \#GP(0) - 0d. */
4688VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4689{
4690 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4691 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4692}
4693
4694
4695/** \#GP(sel) - 0d. */
4696VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4697{
4698 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4699 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4700 NOREF(iSegReg); NOREF(fAccess);
4701 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4702 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4703}
4704
4705#ifdef IEM_WITH_SETJMP
4706/** \#GP(sel) - 0d, longjmp. */
4707DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4708{
4709 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4710 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4711 NOREF(iSegReg); NOREF(fAccess);
4712 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4713 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4714}
4715#endif
4716
4717/** \#GP(sel) - 0d. */
4718VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4719{
4720 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4721 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4722 NOREF(Sel);
4723 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4724}
4725
4726#ifdef IEM_WITH_SETJMP
4727/** \#GP(sel) - 0d, longjmp. */
4728DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4729{
4730 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4731 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4732 NOREF(Sel);
4733 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4734}
4735#endif
4736
4737
4738/** \#GP(sel) - 0d. */
4739VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4740{
4741 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4742 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4743 NOREF(iSegReg); NOREF(fAccess);
4744 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4745}
4746
4747#ifdef IEM_WITH_SETJMP
4748/** \#GP(sel) - 0d, longjmp. */
4749DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4750{
4751 NOREF(iSegReg); NOREF(fAccess);
4752 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4753}
4754#endif
4755
4756
4757/** \#PF(n) - 0e. */
4758VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4759{
4760 uint16_t uErr;
4761 switch (rc)
4762 {
4763 case VERR_PAGE_NOT_PRESENT:
4764 case VERR_PAGE_TABLE_NOT_PRESENT:
4765 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4766 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4767 uErr = 0;
4768 break;
4769
4770 case VERR_RESERVED_PAGE_TABLE_BITS:
4771 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
4772 break;
4773
4774 default:
4775 AssertMsgFailed(("%Rrc\n", rc));
4776 RT_FALL_THRU();
4777 case VERR_ACCESS_DENIED:
4778 uErr = X86_TRAP_PF_P;
4779 break;
4780 }
4781
4782 if (IEM_GET_CPL(pVCpu) == 3)
4783 uErr |= X86_TRAP_PF_US;
4784
4785 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4786 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4787 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4788 uErr |= X86_TRAP_PF_ID;
4789
4790#if 0 /* This is so much non-sense, really. Why was it done like that? */
4791 /* Note! RW access callers reporting a WRITE protection fault, will clear
4792 the READ flag before calling. So, read-modify-write accesses (RW)
4793 can safely be reported as READ faults. */
4794 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4795 uErr |= X86_TRAP_PF_RW;
4796#else
4797 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4798 {
4799 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4800 /// (regardless of outcome of the comparison in the latter case).
4801 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4802 uErr |= X86_TRAP_PF_RW;
4803 }
4804#endif
4805
4806 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4807 of the memory operand rather than at the start of it. (Not sure what
4808 happens if it crosses a page boundrary.) The current heuristics for
4809 this is to report the #PF for the last byte if the access is more than
4810 64 bytes. This is probably not correct, but we can work that out later,
4811 main objective now is to get FXSAVE to work like for real hardware and
4812 make bs3-cpu-basic2 work. */
4813 if (cbAccess <= 64)
4814 { /* likely*/ }
4815 else
4816 GCPtrWhere += cbAccess - 1;
4817
4818 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4819 uErr, GCPtrWhere);
4820}
4821
4822#ifdef IEM_WITH_SETJMP
4823/** \#PF(n) - 0e, longjmp. */
4824DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4825 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4826{
4827 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4828}
4829#endif
4830
4831
4832/** \#MF(0) - 10. */
4833VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4834{
4835 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4836 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4837
4838 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4839 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4840 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4841}
4842
4843#ifdef IEM_WITH_SETJMP
4844/** \#MF(0) - 10, longjmp. */
4845DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4846{
4847 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4848}
4849#endif
4850
4851
4852/** \#AC(0) - 11. */
4853VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4854{
4855 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4856}
4857
4858#ifdef IEM_WITH_SETJMP
4859/** \#AC(0) - 11, longjmp. */
4860DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4861{
4862 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4863}
4864#endif
4865
4866
4867/** \#XF(0)/\#XM(0) - 19. */
4868VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4869{
4870 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4871}
4872
4873
4874#ifdef IEM_WITH_SETJMP
4875/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4876DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4877{
4878 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4879}
4880#endif
4881
4882
4883/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4884IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4885{
4886 NOREF(cbInstr);
4887 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4888}
4889
4890
4891/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4892IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4893{
4894 NOREF(cbInstr);
4895 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4896}
4897
4898
4899/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4900IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4901{
4902 NOREF(cbInstr);
4903 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4904}
4905
4906
4907/** @} */
4908
4909/** @name Common opcode decoders.
4910 * @{
4911 */
4912//#include <iprt/mem.h>
4913
4914/**
4915 * Used to add extra details about a stub case.
4916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4917 */
4918void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4919{
4920#if defined(LOG_ENABLED) && defined(IN_RING3)
4921 PVM pVM = pVCpu->CTX_SUFF(pVM);
4922 char szRegs[4096];
4923 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4924 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4925 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4926 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4927 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4928 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4929 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4930 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4931 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4932 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4933 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4934 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4935 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4936 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4937 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4938 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4939 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4940 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4941 " efer=%016VR{efer}\n"
4942 " pat=%016VR{pat}\n"
4943 " sf_mask=%016VR{sf_mask}\n"
4944 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4945 " lstar=%016VR{lstar}\n"
4946 " star=%016VR{star} cstar=%016VR{cstar}\n"
4947 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4948 );
4949
4950 char szInstr[256];
4951 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4952 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4953 szInstr, sizeof(szInstr), NULL);
4954
4955 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4956#else
4957 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4958#endif
4959}
4960
4961/** @} */
4962
4963
4964
4965/** @name Register Access.
4966 * @{
4967 */
4968
4969/**
4970 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4971 *
4972 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4973 * segment limit.
4974 *
4975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4976 * @param cbInstr Instruction size.
4977 * @param offNextInstr The offset of the next instruction.
4978 * @param enmEffOpSize Effective operand size.
4979 */
4980VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4981 IEMMODE enmEffOpSize) RT_NOEXCEPT
4982{
4983 switch (enmEffOpSize)
4984 {
4985 case IEMMODE_16BIT:
4986 {
4987 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4988 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4989 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4990 pVCpu->cpum.GstCtx.rip = uNewIp;
4991 else
4992 return iemRaiseGeneralProtectionFault0(pVCpu);
4993 break;
4994 }
4995
4996 case IEMMODE_32BIT:
4997 {
4998 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4999 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
5000
5001 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
5002 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5003 pVCpu->cpum.GstCtx.rip = uNewEip;
5004 else
5005 return iemRaiseGeneralProtectionFault0(pVCpu);
5006 break;
5007 }
5008
5009 case IEMMODE_64BIT:
5010 {
5011 Assert(IEM_IS_64BIT_CODE(pVCpu));
5012
5013 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5014 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5015 pVCpu->cpum.GstCtx.rip = uNewRip;
5016 else
5017 return iemRaiseGeneralProtectionFault0(pVCpu);
5018 break;
5019 }
5020
5021 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5022 }
5023
5024#ifndef IEM_WITH_CODE_TLB
5025 /* Flush the prefetch buffer. */
5026 pVCpu->iem.s.cbOpcode = cbInstr;
5027#endif
5028
5029 /*
5030 * Clear RF and finish the instruction (maybe raise #DB).
5031 */
5032 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5033}
5034
5035
5036/**
5037 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5038 *
5039 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5040 * segment limit.
5041 *
5042 * @returns Strict VBox status code.
5043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5044 * @param cbInstr Instruction size.
5045 * @param offNextInstr The offset of the next instruction.
5046 */
5047VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
5048{
5049 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5050
5051 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
5052 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5053 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
5054 pVCpu->cpum.GstCtx.rip = uNewIp;
5055 else
5056 return iemRaiseGeneralProtectionFault0(pVCpu);
5057
5058#ifndef IEM_WITH_CODE_TLB
5059 /* Flush the prefetch buffer. */
5060 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5061#endif
5062
5063 /*
5064 * Clear RF and finish the instruction (maybe raise #DB).
5065 */
5066 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5067}
5068
5069
5070/**
5071 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5072 *
5073 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5074 * segment limit.
5075 *
5076 * @returns Strict VBox status code.
5077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5078 * @param cbInstr Instruction size.
5079 * @param offNextInstr The offset of the next instruction.
5080 * @param enmEffOpSize Effective operand size.
5081 */
5082VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
5083 IEMMODE enmEffOpSize) RT_NOEXCEPT
5084{
5085 if (enmEffOpSize == IEMMODE_32BIT)
5086 {
5087 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
5088
5089 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
5090 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5091 pVCpu->cpum.GstCtx.rip = uNewEip;
5092 else
5093 return iemRaiseGeneralProtectionFault0(pVCpu);
5094 }
5095 else
5096 {
5097 Assert(enmEffOpSize == IEMMODE_64BIT);
5098
5099 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5100 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5101 pVCpu->cpum.GstCtx.rip = uNewRip;
5102 else
5103 return iemRaiseGeneralProtectionFault0(pVCpu);
5104 }
5105
5106#ifndef IEM_WITH_CODE_TLB
5107 /* Flush the prefetch buffer. */
5108 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5109#endif
5110
5111 /*
5112 * Clear RF and finish the instruction (maybe raise #DB).
5113 */
5114 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5115}
5116
5117/** @} */
5118
5119
5120/** @name FPU access and helpers.
5121 *
5122 * @{
5123 */
5124
5125/**
5126 * Updates the x87.DS and FPUDP registers.
5127 *
5128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5129 * @param pFpuCtx The FPU context.
5130 * @param iEffSeg The effective segment register.
5131 * @param GCPtrEff The effective address relative to @a iEffSeg.
5132 */
5133DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5134{
5135 RTSEL sel;
5136 switch (iEffSeg)
5137 {
5138 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
5139 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
5140 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
5141 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
5142 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
5143 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
5144 default:
5145 AssertMsgFailed(("%d\n", iEffSeg));
5146 sel = pVCpu->cpum.GstCtx.ds.Sel;
5147 }
5148 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5149 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5150 {
5151 pFpuCtx->DS = 0;
5152 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
5153 }
5154 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
5155 {
5156 pFpuCtx->DS = sel;
5157 pFpuCtx->FPUDP = GCPtrEff;
5158 }
5159 else
5160 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
5161}
5162
5163
5164/**
5165 * Rotates the stack registers in the push direction.
5166 *
5167 * @param pFpuCtx The FPU context.
5168 * @remarks This is a complete waste of time, but fxsave stores the registers in
5169 * stack order.
5170 */
5171DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5172{
5173 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5174 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5175 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5176 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5177 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5178 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5179 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5180 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5181 pFpuCtx->aRegs[0].r80 = r80Tmp;
5182}
5183
5184
5185/**
5186 * Rotates the stack registers in the pop direction.
5187 *
5188 * @param pFpuCtx The FPU context.
5189 * @remarks This is a complete waste of time, but fxsave stores the registers in
5190 * stack order.
5191 */
5192DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5193{
5194 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5195 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5196 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5197 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5198 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5199 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5200 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5201 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5202 pFpuCtx->aRegs[7].r80 = r80Tmp;
5203}
5204
5205
5206/**
5207 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5208 * exception prevents it.
5209 *
5210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5211 * @param pResult The FPU operation result to push.
5212 * @param pFpuCtx The FPU context.
5213 */
5214static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5215{
5216 /* Update FSW and bail if there are pending exceptions afterwards. */
5217 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5218 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5219 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5220 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5221 {
5222 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
5223 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
5224 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5225 pFpuCtx->FSW = fFsw;
5226 return;
5227 }
5228
5229 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5230 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5231 {
5232 /* All is fine, push the actual value. */
5233 pFpuCtx->FTW |= RT_BIT(iNewTop);
5234 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5235 }
5236 else if (pFpuCtx->FCW & X86_FCW_IM)
5237 {
5238 /* Masked stack overflow, push QNaN. */
5239 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5240 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5241 }
5242 else
5243 {
5244 /* Raise stack overflow, don't push anything. */
5245 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5246 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5247 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5248 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5249 return;
5250 }
5251
5252 fFsw &= ~X86_FSW_TOP_MASK;
5253 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5254 pFpuCtx->FSW = fFsw;
5255
5256 iemFpuRotateStackPush(pFpuCtx);
5257 RT_NOREF(pVCpu);
5258}
5259
5260
5261/**
5262 * Stores a result in a FPU register and updates the FSW and FTW.
5263 *
5264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5265 * @param pFpuCtx The FPU context.
5266 * @param pResult The result to store.
5267 * @param iStReg Which FPU register to store it in.
5268 */
5269static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5270{
5271 Assert(iStReg < 8);
5272 uint16_t fNewFsw = pFpuCtx->FSW;
5273 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
5274 fNewFsw &= ~X86_FSW_C_MASK;
5275 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5276 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5277 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5278 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5279 pFpuCtx->FSW = fNewFsw;
5280 pFpuCtx->FTW |= RT_BIT(iReg);
5281 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5282 RT_NOREF(pVCpu);
5283}
5284
5285
5286/**
5287 * Only updates the FPU status word (FSW) with the result of the current
5288 * instruction.
5289 *
5290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5291 * @param pFpuCtx The FPU context.
5292 * @param u16FSW The FSW output of the current instruction.
5293 */
5294static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
5295{
5296 uint16_t fNewFsw = pFpuCtx->FSW;
5297 fNewFsw &= ~X86_FSW_C_MASK;
5298 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
5299 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5300 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5301 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5302 pFpuCtx->FSW = fNewFsw;
5303 RT_NOREF(pVCpu);
5304}
5305
5306
5307/**
5308 * Pops one item off the FPU stack if no pending exception prevents it.
5309 *
5310 * @param pFpuCtx The FPU context.
5311 */
5312static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5313{
5314 /* Check pending exceptions. */
5315 uint16_t uFSW = pFpuCtx->FSW;
5316 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5317 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5318 return;
5319
5320 /* TOP--. */
5321 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5322 uFSW &= ~X86_FSW_TOP_MASK;
5323 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5324 pFpuCtx->FSW = uFSW;
5325
5326 /* Mark the previous ST0 as empty. */
5327 iOldTop >>= X86_FSW_TOP_SHIFT;
5328 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5329
5330 /* Rotate the registers. */
5331 iemFpuRotateStackPop(pFpuCtx);
5332}
5333
5334
5335/**
5336 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5337 *
5338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5339 * @param pResult The FPU operation result to push.
5340 * @param uFpuOpcode The FPU opcode value.
5341 */
5342void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5343{
5344 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5345 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5346 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5347}
5348
5349
5350/**
5351 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5352 * and sets FPUDP and FPUDS.
5353 *
5354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5355 * @param pResult The FPU operation result to push.
5356 * @param iEffSeg The effective segment register.
5357 * @param GCPtrEff The effective address relative to @a iEffSeg.
5358 * @param uFpuOpcode The FPU opcode value.
5359 */
5360void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5361 uint16_t uFpuOpcode) RT_NOEXCEPT
5362{
5363 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5364 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5365 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5366 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5367}
5368
5369
5370/**
5371 * Replace ST0 with the first value and push the second onto the FPU stack,
5372 * unless a pending exception prevents it.
5373 *
5374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5375 * @param pResult The FPU operation result to store and push.
5376 * @param uFpuOpcode The FPU opcode value.
5377 */
5378void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5379{
5380 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5381 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5382
5383 /* Update FSW and bail if there are pending exceptions afterwards. */
5384 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5385 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5386 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5387 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5388 {
5389 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5390 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5391 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5392 pFpuCtx->FSW = fFsw;
5393 return;
5394 }
5395
5396 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5397 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5398 {
5399 /* All is fine, push the actual value. */
5400 pFpuCtx->FTW |= RT_BIT(iNewTop);
5401 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5402 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5403 }
5404 else if (pFpuCtx->FCW & X86_FCW_IM)
5405 {
5406 /* Masked stack overflow, push QNaN. */
5407 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5408 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5409 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5410 }
5411 else
5412 {
5413 /* Raise stack overflow, don't push anything. */
5414 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5415 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5416 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5417 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5418 return;
5419 }
5420
5421 fFsw &= ~X86_FSW_TOP_MASK;
5422 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5423 pFpuCtx->FSW = fFsw;
5424
5425 iemFpuRotateStackPush(pFpuCtx);
5426}
5427
5428
5429/**
5430 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5431 * FOP.
5432 *
5433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5434 * @param pResult The result to store.
5435 * @param iStReg Which FPU register to store it in.
5436 * @param uFpuOpcode The FPU opcode value.
5437 */
5438void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5439{
5440 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5441 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5442 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5443}
5444
5445
5446/**
5447 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5448 * FOP, and then pops the stack.
5449 *
5450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5451 * @param pResult The result to store.
5452 * @param iStReg Which FPU register to store it in.
5453 * @param uFpuOpcode The FPU opcode value.
5454 */
5455void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5456{
5457 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5458 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5459 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5460 iemFpuMaybePopOne(pFpuCtx);
5461}
5462
5463
5464/**
5465 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5466 * FPUDP, and FPUDS.
5467 *
5468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5469 * @param pResult The result to store.
5470 * @param iStReg Which FPU register to store it in.
5471 * @param iEffSeg The effective memory operand selector register.
5472 * @param GCPtrEff The effective memory operand offset.
5473 * @param uFpuOpcode The FPU opcode value.
5474 */
5475void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5476 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5477{
5478 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5479 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5480 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5481 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5482}
5483
5484
5485/**
5486 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5487 * FPUDP, and FPUDS, and then pops the stack.
5488 *
5489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5490 * @param pResult The result to store.
5491 * @param iStReg Which FPU register to store it in.
5492 * @param iEffSeg The effective memory operand selector register.
5493 * @param GCPtrEff The effective memory operand offset.
5494 * @param uFpuOpcode The FPU opcode value.
5495 */
5496void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5497 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5498{
5499 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5500 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5501 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5502 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5503 iemFpuMaybePopOne(pFpuCtx);
5504}
5505
5506
5507/**
5508 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5509 *
5510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5511 * @param uFpuOpcode The FPU opcode value.
5512 */
5513void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5514{
5515 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5516 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5517}
5518
5519
5520/**
5521 * Updates the FSW, FOP, FPUIP, and FPUCS.
5522 *
5523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5524 * @param u16FSW The FSW from the current instruction.
5525 * @param uFpuOpcode The FPU opcode value.
5526 */
5527void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5528{
5529 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5530 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5531 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5532}
5533
5534
5535/**
5536 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5537 *
5538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5539 * @param u16FSW The FSW from the current instruction.
5540 * @param uFpuOpcode The FPU opcode value.
5541 */
5542void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5543{
5544 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5545 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5546 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5547 iemFpuMaybePopOne(pFpuCtx);
5548}
5549
5550
5551/**
5552 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5553 *
5554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5555 * @param u16FSW The FSW from the current instruction.
5556 * @param iEffSeg The effective memory operand selector register.
5557 * @param GCPtrEff The effective memory operand offset.
5558 * @param uFpuOpcode The FPU opcode value.
5559 */
5560void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5561{
5562 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5563 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5564 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5565 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5566}
5567
5568
5569/**
5570 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5571 *
5572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5573 * @param u16FSW The FSW from the current instruction.
5574 * @param uFpuOpcode The FPU opcode value.
5575 */
5576void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5577{
5578 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5579 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5580 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5581 iemFpuMaybePopOne(pFpuCtx);
5582 iemFpuMaybePopOne(pFpuCtx);
5583}
5584
5585
5586/**
5587 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5588 *
5589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5590 * @param u16FSW The FSW from the current instruction.
5591 * @param iEffSeg The effective memory operand selector register.
5592 * @param GCPtrEff The effective memory operand offset.
5593 * @param uFpuOpcode The FPU opcode value.
5594 */
5595void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5596 uint16_t uFpuOpcode) RT_NOEXCEPT
5597{
5598 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5599 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5600 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5601 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5602 iemFpuMaybePopOne(pFpuCtx);
5603}
5604
5605
5606/**
5607 * Worker routine for raising an FPU stack underflow exception.
5608 *
5609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5610 * @param pFpuCtx The FPU context.
5611 * @param iStReg The stack register being accessed.
5612 */
5613static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5614{
5615 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5616 if (pFpuCtx->FCW & X86_FCW_IM)
5617 {
5618 /* Masked underflow. */
5619 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5620 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5621 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5622 if (iStReg != UINT8_MAX)
5623 {
5624 pFpuCtx->FTW |= RT_BIT(iReg);
5625 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5626 }
5627 }
5628 else
5629 {
5630 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5631 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5632 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5633 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5634 }
5635 RT_NOREF(pVCpu);
5636}
5637
5638
5639/**
5640 * Raises a FPU stack underflow exception.
5641 *
5642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5643 * @param iStReg The destination register that should be loaded
5644 * with QNaN if \#IS is not masked. Specify
5645 * UINT8_MAX if none (like for fcom).
5646 * @param uFpuOpcode The FPU opcode value.
5647 */
5648void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5649{
5650 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5651 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5652 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5653}
5654
5655
5656void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5657{
5658 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5659 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5660 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5661 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5662}
5663
5664
5665void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5666{
5667 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5668 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5669 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5670 iemFpuMaybePopOne(pFpuCtx);
5671}
5672
5673
5674void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5675 uint16_t uFpuOpcode) RT_NOEXCEPT
5676{
5677 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5678 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5679 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5680 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5681 iemFpuMaybePopOne(pFpuCtx);
5682}
5683
5684
5685void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5686{
5687 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5688 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5689 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5690 iemFpuMaybePopOne(pFpuCtx);
5691 iemFpuMaybePopOne(pFpuCtx);
5692}
5693
5694
5695void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5696{
5697 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5698 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5699
5700 if (pFpuCtx->FCW & X86_FCW_IM)
5701 {
5702 /* Masked overflow - Push QNaN. */
5703 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5704 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5705 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5706 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5707 pFpuCtx->FTW |= RT_BIT(iNewTop);
5708 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5709 iemFpuRotateStackPush(pFpuCtx);
5710 }
5711 else
5712 {
5713 /* Exception pending - don't change TOP or the register stack. */
5714 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5715 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5716 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5717 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5718 }
5719}
5720
5721
5722void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5723{
5724 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5725 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5726
5727 if (pFpuCtx->FCW & X86_FCW_IM)
5728 {
5729 /* Masked overflow - Push QNaN. */
5730 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5731 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5732 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5733 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5734 pFpuCtx->FTW |= RT_BIT(iNewTop);
5735 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5736 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5737 iemFpuRotateStackPush(pFpuCtx);
5738 }
5739 else
5740 {
5741 /* Exception pending - don't change TOP or the register stack. */
5742 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5743 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5744 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5745 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5746 }
5747}
5748
5749
5750/**
5751 * Worker routine for raising an FPU stack overflow exception on a push.
5752 *
5753 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5754 * @param pFpuCtx The FPU context.
5755 */
5756static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5757{
5758 if (pFpuCtx->FCW & X86_FCW_IM)
5759 {
5760 /* Masked overflow. */
5761 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5762 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5763 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5764 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5765 pFpuCtx->FTW |= RT_BIT(iNewTop);
5766 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5767 iemFpuRotateStackPush(pFpuCtx);
5768 }
5769 else
5770 {
5771 /* Exception pending - don't change TOP or the register stack. */
5772 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5773 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5774 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5775 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5776 }
5777 RT_NOREF(pVCpu);
5778}
5779
5780
5781/**
5782 * Raises a FPU stack overflow exception on a push.
5783 *
5784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5785 * @param uFpuOpcode The FPU opcode value.
5786 */
5787void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5788{
5789 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5790 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5791 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5792}
5793
5794
5795/**
5796 * Raises a FPU stack overflow exception on a push with a memory operand.
5797 *
5798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5799 * @param iEffSeg The effective memory operand selector register.
5800 * @param GCPtrEff The effective memory operand offset.
5801 * @param uFpuOpcode The FPU opcode value.
5802 */
5803void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5804{
5805 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5806 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5807 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5808 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5809}
5810
5811/** @} */
5812
5813
5814/** @name Memory access.
5815 *
5816 * @{
5817 */
5818
5819#undef LOG_GROUP
5820#define LOG_GROUP LOG_GROUP_IEM_MEM
5821
5822/**
5823 * Updates the IEMCPU::cbWritten counter if applicable.
5824 *
5825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5826 * @param fAccess The access being accounted for.
5827 * @param cbMem The access size.
5828 */
5829DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5830{
5831 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5832 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5833 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5834}
5835
5836
5837/**
5838 * Applies the segment limit, base and attributes.
5839 *
5840 * This may raise a \#GP or \#SS.
5841 *
5842 * @returns VBox strict status code.
5843 *
5844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5845 * @param fAccess The kind of access which is being performed.
5846 * @param iSegReg The index of the segment register to apply.
5847 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5848 * TSS, ++).
5849 * @param cbMem The access size.
5850 * @param pGCPtrMem Pointer to the guest memory address to apply
5851 * segmentation to. Input and output parameter.
5852 */
5853VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5854{
5855 if (iSegReg == UINT8_MAX)
5856 return VINF_SUCCESS;
5857
5858 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5859 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5860 switch (IEM_GET_CPU_MODE(pVCpu))
5861 {
5862 case IEMMODE_16BIT:
5863 case IEMMODE_32BIT:
5864 {
5865 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5866 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5867
5868 if ( pSel->Attr.n.u1Present
5869 && !pSel->Attr.n.u1Unusable)
5870 {
5871 Assert(pSel->Attr.n.u1DescType);
5872 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5873 {
5874 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5875 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5876 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5877
5878 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5879 {
5880 /** @todo CPL check. */
5881 }
5882
5883 /*
5884 * There are two kinds of data selectors, normal and expand down.
5885 */
5886 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5887 {
5888 if ( GCPtrFirst32 > pSel->u32Limit
5889 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5890 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5891 }
5892 else
5893 {
5894 /*
5895 * The upper boundary is defined by the B bit, not the G bit!
5896 */
5897 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5898 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5899 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5900 }
5901 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5902 }
5903 else
5904 {
5905 /*
5906 * Code selector and usually be used to read thru, writing is
5907 * only permitted in real and V8086 mode.
5908 */
5909 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5910 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5911 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5912 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5913 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5914
5915 if ( GCPtrFirst32 > pSel->u32Limit
5916 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5917 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5918
5919 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5920 {
5921 /** @todo CPL check. */
5922 }
5923
5924 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5925 }
5926 }
5927 else
5928 return iemRaiseGeneralProtectionFault0(pVCpu);
5929 return VINF_SUCCESS;
5930 }
5931
5932 case IEMMODE_64BIT:
5933 {
5934 RTGCPTR GCPtrMem = *pGCPtrMem;
5935 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5936 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5937
5938 Assert(cbMem >= 1);
5939 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5940 return VINF_SUCCESS;
5941 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5942 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5943 return iemRaiseGeneralProtectionFault0(pVCpu);
5944 }
5945
5946 default:
5947 AssertFailedReturn(VERR_IEM_IPE_7);
5948 }
5949}
5950
5951
5952/**
5953 * Translates a virtual address to a physical physical address and checks if we
5954 * can access the page as specified.
5955 *
5956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5957 * @param GCPtrMem The virtual address.
5958 * @param cbAccess The access size, for raising \#PF correctly for
5959 * FXSAVE and such.
5960 * @param fAccess The intended access.
5961 * @param pGCPhysMem Where to return the physical address.
5962 */
5963VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5964 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5965{
5966 /** @todo Need a different PGM interface here. We're currently using
5967 * generic / REM interfaces. this won't cut it for R0. */
5968 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5969 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5970 * here. */
5971 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5972 PGMPTWALKFAST WalkFast;
5973 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
5974 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
5975 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
5976 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
5977 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
5978 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
5979 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5980 fQPage |= PGMQPAGE_F_USER_MODE;
5981 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
5982 if (RT_SUCCESS(rc))
5983 {
5984 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
5985
5986 /* If the page is writable and does not have the no-exec bit set, all
5987 access is allowed. Otherwise we'll have to check more carefully... */
5988 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
5989 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
5990 || (WalkFast.fEffective & X86_PTE_RW)
5991 || ( ( IEM_GET_CPL(pVCpu) != 3
5992 || (fAccess & IEM_ACCESS_WHAT_SYS))
5993 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
5994 && ( (WalkFast.fEffective & X86_PTE_US)
5995 || IEM_GET_CPL(pVCpu) != 3
5996 || (fAccess & IEM_ACCESS_WHAT_SYS) )
5997 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
5998 || !(WalkFast.fEffective & X86_PTE_PAE_NX)
5999 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
6000 )
6001 );
6002
6003 /* PGMGstQueryPageFast sets the A & D bits. */
6004 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6005 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
6006
6007 *pGCPhysMem = WalkFast.GCPhys;
6008 return VINF_SUCCESS;
6009 }
6010
6011 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6012 /** @todo Check unassigned memory in unpaged mode. */
6013#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6014 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6015 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6016#endif
6017 *pGCPhysMem = NIL_RTGCPHYS;
6018 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
6019}
6020
6021#if 0 /*unused*/
6022/**
6023 * Looks up a memory mapping entry.
6024 *
6025 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6027 * @param pvMem The memory address.
6028 * @param fAccess The access to.
6029 */
6030DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
6031{
6032 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6033 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6034 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
6035 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6036 return 0;
6037 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
6038 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6039 return 1;
6040 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
6041 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6042 return 2;
6043 return VERR_NOT_FOUND;
6044}
6045#endif
6046
6047/**
6048 * Finds a free memmap entry when using iNextMapping doesn't work.
6049 *
6050 * @returns Memory mapping index, 1024 on failure.
6051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6052 */
6053static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
6054{
6055 /*
6056 * The easy case.
6057 */
6058 if (pVCpu->iem.s.cActiveMappings == 0)
6059 {
6060 pVCpu->iem.s.iNextMapping = 1;
6061 return 0;
6062 }
6063
6064 /* There should be enough mappings for all instructions. */
6065 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
6066
6067 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
6068 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6069 return i;
6070
6071 AssertFailedReturn(1024);
6072}
6073
6074
6075/**
6076 * Commits a bounce buffer that needs writing back and unmaps it.
6077 *
6078 * @returns Strict VBox status code.
6079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6080 * @param iMemMap The index of the buffer to commit.
6081 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
6082 * Always false in ring-3, obviously.
6083 */
6084static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
6085{
6086 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6087 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6088#ifdef IN_RING3
6089 Assert(!fPostponeFail);
6090 RT_NOREF_PV(fPostponeFail);
6091#endif
6092
6093 /*
6094 * Do the writing.
6095 */
6096 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6097 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
6098 {
6099 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
6100 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6101 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6102 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6103 {
6104 /*
6105 * Carefully and efficiently dealing with access handler return
6106 * codes make this a little bloated.
6107 */
6108 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6109 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6110 pbBuf,
6111 cbFirst,
6112 PGMACCESSORIGIN_IEM);
6113 if (rcStrict == VINF_SUCCESS)
6114 {
6115 if (cbSecond)
6116 {
6117 rcStrict = PGMPhysWrite(pVM,
6118 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6119 pbBuf + cbFirst,
6120 cbSecond,
6121 PGMACCESSORIGIN_IEM);
6122 if (rcStrict == VINF_SUCCESS)
6123 { /* nothing */ }
6124 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6125 {
6126 LogEx(LOG_GROUP_IEM,
6127 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6128 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6129 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6130 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6131 }
6132#ifndef IN_RING3
6133 else if (fPostponeFail)
6134 {
6135 LogEx(LOG_GROUP_IEM,
6136 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6137 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6138 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6139 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6140 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6141 return iemSetPassUpStatus(pVCpu, rcStrict);
6142 }
6143#endif
6144 else
6145 {
6146 LogEx(LOG_GROUP_IEM,
6147 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6148 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6149 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6150 return rcStrict;
6151 }
6152 }
6153 }
6154 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6155 {
6156 if (!cbSecond)
6157 {
6158 LogEx(LOG_GROUP_IEM,
6159 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6160 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6161 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6162 }
6163 else
6164 {
6165 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6166 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6167 pbBuf + cbFirst,
6168 cbSecond,
6169 PGMACCESSORIGIN_IEM);
6170 if (rcStrict2 == VINF_SUCCESS)
6171 {
6172 LogEx(LOG_GROUP_IEM,
6173 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6174 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6175 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6176 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6177 }
6178 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6179 {
6180 LogEx(LOG_GROUP_IEM,
6181 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6182 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6183 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6184 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6185 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6186 }
6187#ifndef IN_RING3
6188 else if (fPostponeFail)
6189 {
6190 LogEx(LOG_GROUP_IEM,
6191 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6192 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6193 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6194 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6195 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6196 return iemSetPassUpStatus(pVCpu, rcStrict);
6197 }
6198#endif
6199 else
6200 {
6201 LogEx(LOG_GROUP_IEM,
6202 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6203 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6204 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6205 return rcStrict2;
6206 }
6207 }
6208 }
6209#ifndef IN_RING3
6210 else if (fPostponeFail)
6211 {
6212 LogEx(LOG_GROUP_IEM,
6213 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6214 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6215 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6216 if (!cbSecond)
6217 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
6218 else
6219 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
6220 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6221 return iemSetPassUpStatus(pVCpu, rcStrict);
6222 }
6223#endif
6224 else
6225 {
6226 LogEx(LOG_GROUP_IEM,
6227 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6228 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6229 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6230 return rcStrict;
6231 }
6232 }
6233 else
6234 {
6235 /*
6236 * No access handlers, much simpler.
6237 */
6238 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6239 if (RT_SUCCESS(rc))
6240 {
6241 if (cbSecond)
6242 {
6243 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6244 if (RT_SUCCESS(rc))
6245 { /* likely */ }
6246 else
6247 {
6248 LogEx(LOG_GROUP_IEM,
6249 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6250 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6251 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6252 return rc;
6253 }
6254 }
6255 }
6256 else
6257 {
6258 LogEx(LOG_GROUP_IEM,
6259 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6260 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6261 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6262 return rc;
6263 }
6264 }
6265 }
6266
6267#if defined(IEM_LOG_MEMORY_WRITES)
6268 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6269 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6270 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6271 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6272 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6273 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6274
6275 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6276 g_cbIemWrote = cbWrote;
6277 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6278#endif
6279
6280 /*
6281 * Free the mapping entry.
6282 */
6283 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6284 Assert(pVCpu->iem.s.cActiveMappings != 0);
6285 pVCpu->iem.s.cActiveMappings--;
6286 return VINF_SUCCESS;
6287}
6288
6289
6290/**
6291 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
6292 */
6293DECL_FORCE_INLINE(uint32_t)
6294iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
6295{
6296 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
6297 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6298 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6299 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6300}
6301
6302
6303/**
6304 * iemMemMap worker that deals with a request crossing pages.
6305 */
6306static VBOXSTRICTRC
6307iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6308 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6309{
6310 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
6311 Assert(cbMem <= GUEST_PAGE_SIZE);
6312
6313 /*
6314 * Do the address translations.
6315 */
6316 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6317 RTGCPHYS GCPhysFirst;
6318 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6319 if (rcStrict != VINF_SUCCESS)
6320 return rcStrict;
6321 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6322
6323 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6324 RTGCPHYS GCPhysSecond;
6325 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6326 cbSecondPage, fAccess, &GCPhysSecond);
6327 if (rcStrict != VINF_SUCCESS)
6328 return rcStrict;
6329 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6330 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6331
6332 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6333
6334 /*
6335 * Check for data breakpoints.
6336 */
6337 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
6338 { /* likely */ }
6339 else
6340 {
6341 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
6342 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6343 cbSecondPage, fAccess);
6344 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6345 if (fDataBps > 1)
6346 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6347 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6348 }
6349
6350 /*
6351 * Read in the current memory content if it's a read, execute or partial
6352 * write access.
6353 */
6354 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6355
6356 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6357 {
6358 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6359 {
6360 /*
6361 * Must carefully deal with access handler status codes here,
6362 * makes the code a bit bloated.
6363 */
6364 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6365 if (rcStrict == VINF_SUCCESS)
6366 {
6367 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6368 if (rcStrict == VINF_SUCCESS)
6369 { /*likely */ }
6370 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6371 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6372 else
6373 {
6374 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6375 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6376 return rcStrict;
6377 }
6378 }
6379 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6380 {
6381 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6382 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6383 {
6384 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6385 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6386 }
6387 else
6388 {
6389 LogEx(LOG_GROUP_IEM,
6390 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6391 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6392 return rcStrict2;
6393 }
6394 }
6395 else
6396 {
6397 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6398 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6399 return rcStrict;
6400 }
6401 }
6402 else
6403 {
6404 /*
6405 * No informational status codes here, much more straight forward.
6406 */
6407 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6408 if (RT_SUCCESS(rc))
6409 {
6410 Assert(rc == VINF_SUCCESS);
6411 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6412 if (RT_SUCCESS(rc))
6413 Assert(rc == VINF_SUCCESS);
6414 else
6415 {
6416 LogEx(LOG_GROUP_IEM,
6417 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6418 return rc;
6419 }
6420 }
6421 else
6422 {
6423 LogEx(LOG_GROUP_IEM,
6424 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6425 return rc;
6426 }
6427 }
6428 }
6429#ifdef VBOX_STRICT
6430 else
6431 memset(pbBuf, 0xcc, cbMem);
6432 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6433 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6434#endif
6435 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6436
6437 /*
6438 * Commit the bounce buffer entry.
6439 */
6440 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6441 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6442 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6443 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6444 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6445 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6446 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6447 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6448 pVCpu->iem.s.cActiveMappings++;
6449
6450 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6451 *ppvMem = pbBuf;
6452 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6453 return VINF_SUCCESS;
6454}
6455
6456
6457/**
6458 * iemMemMap woker that deals with iemMemPageMap failures.
6459 */
6460static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6461 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6462{
6463 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
6464
6465 /*
6466 * Filter out conditions we can handle and the ones which shouldn't happen.
6467 */
6468 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6469 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6470 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6471 {
6472 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6473 return rcMap;
6474 }
6475 pVCpu->iem.s.cPotentialExits++;
6476
6477 /*
6478 * Read in the current memory content if it's a read, execute or partial
6479 * write access.
6480 */
6481 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6482 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6483 {
6484 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6485 memset(pbBuf, 0xff, cbMem);
6486 else
6487 {
6488 int rc;
6489 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6490 {
6491 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6492 if (rcStrict == VINF_SUCCESS)
6493 { /* nothing */ }
6494 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6495 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6496 else
6497 {
6498 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6499 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6500 return rcStrict;
6501 }
6502 }
6503 else
6504 {
6505 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6506 if (RT_SUCCESS(rc))
6507 { /* likely */ }
6508 else
6509 {
6510 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6511 GCPhysFirst, rc));
6512 return rc;
6513 }
6514 }
6515 }
6516 }
6517#ifdef VBOX_STRICT
6518 else
6519 memset(pbBuf, 0xcc, cbMem);
6520#endif
6521#ifdef VBOX_STRICT
6522 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6523 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6524#endif
6525
6526 /*
6527 * Commit the bounce buffer entry.
6528 */
6529 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6530 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6531 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6532 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6533 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6534 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6535 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6536 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6537 pVCpu->iem.s.cActiveMappings++;
6538
6539 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6540 *ppvMem = pbBuf;
6541 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6542 return VINF_SUCCESS;
6543}
6544
6545
6546
6547/**
6548 * Maps the specified guest memory for the given kind of access.
6549 *
6550 * This may be using bounce buffering of the memory if it's crossing a page
6551 * boundary or if there is an access handler installed for any of it. Because
6552 * of lock prefix guarantees, we're in for some extra clutter when this
6553 * happens.
6554 *
6555 * This may raise a \#GP, \#SS, \#PF or \#AC.
6556 *
6557 * @returns VBox strict status code.
6558 *
6559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6560 * @param ppvMem Where to return the pointer to the mapped memory.
6561 * @param pbUnmapInfo Where to return unmap info to be passed to
6562 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6563 * done.
6564 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6565 * 8, 12, 16, 32 or 512. When used by string operations
6566 * it can be up to a page.
6567 * @param iSegReg The index of the segment register to use for this
6568 * access. The base and limits are checked. Use UINT8_MAX
6569 * to indicate that no segmentation is required (for IDT,
6570 * GDT and LDT accesses).
6571 * @param GCPtrMem The address of the guest memory.
6572 * @param fAccess How the memory is being accessed. The
6573 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6574 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6575 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6576 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6577 * set.
6578 * @param uAlignCtl Alignment control:
6579 * - Bits 15:0 is the alignment mask.
6580 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6581 * IEM_MEMMAP_F_ALIGN_SSE, and
6582 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6583 * Pass zero to skip alignment.
6584 */
6585VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6586 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6587{
6588 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);
6589
6590 /*
6591 * Check the input and figure out which mapping entry to use.
6592 */
6593 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6594 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6595 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6596 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6597 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6598
6599 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6600 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6601 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6602 {
6603 iMemMap = iemMemMapFindFree(pVCpu);
6604 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6605 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6606 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6607 pVCpu->iem.s.aMemMappings[2].fAccess),
6608 VERR_IEM_IPE_9);
6609 }
6610
6611 /*
6612 * Map the memory, checking that we can actually access it. If something
6613 * slightly complicated happens, fall back on bounce buffering.
6614 */
6615 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6616 if (rcStrict == VINF_SUCCESS)
6617 { /* likely */ }
6618 else
6619 return rcStrict;
6620
6621 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6622 { /* likely */ }
6623 else
6624 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6625
6626 /*
6627 * Alignment check.
6628 */
6629 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6630 { /* likelyish */ }
6631 else
6632 {
6633 /* Misaligned access. */
6634 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6635 {
6636 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6637 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6638 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6639 {
6640 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6641
6642 if (!iemMemAreAlignmentChecksEnabled(pVCpu))
6643 { /* likely */ }
6644 else
6645 return iemRaiseAlignmentCheckException(pVCpu);
6646 }
6647 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6648 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6649 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6650 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6651 * that's what FXSAVE does on a 10980xe. */
6652 && iemMemAreAlignmentChecksEnabled(pVCpu))
6653 return iemRaiseAlignmentCheckException(pVCpu);
6654 else
6655 return iemRaiseGeneralProtectionFault0(pVCpu);
6656 }
6657
6658#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6659 /* If the access is atomic there are host platform alignmnet restrictions
6660 we need to conform with. */
6661 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6662# if defined(RT_ARCH_AMD64)
6663 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6664# elif defined(RT_ARCH_ARM64)
6665 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6666# else
6667# error port me
6668# endif
6669 )
6670 { /* okay */ }
6671 else
6672 {
6673 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6674 pVCpu->iem.s.cMisalignedAtomics += 1;
6675 return VINF_EM_EMULATE_SPLIT_LOCK;
6676 }
6677#endif
6678 }
6679
6680#ifdef IEM_WITH_DATA_TLB
6681 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6682
6683 /*
6684 * Get the TLB entry for this page and check PT flags.
6685 *
6686 * We reload the TLB entry if we need to set the dirty bit (accessed
6687 * should in theory always be set).
6688 */
6689 uint8_t *pbMem = NULL;
6690 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
6691 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
6692 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);
6693 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
6694 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
6695 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
6696 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
6697 {
6698# ifdef IEM_WITH_TLB_STATISTICS
6699 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
6700#endif
6701
6702 /* If the page is either supervisor only or non-writable, we need to do
6703 more careful access checks. */
6704 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6705 {
6706 /* Write to read only memory? */
6707 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6708 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6709 && ( ( IEM_GET_CPL(pVCpu) == 3
6710 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6711 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6712 {
6713 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6714 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6715 }
6716
6717 /* Kernel memory accessed by userland? */
6718 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6719 && IEM_GET_CPL(pVCpu) == 3
6720 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6721 {
6722 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6723 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6724 }
6725 }
6726
6727 /* Look up the physical page info if necessary. */
6728 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6729# ifdef IN_RING3
6730 pbMem = pTlbe->pbMappingR3;
6731# else
6732 pbMem = NULL;
6733# endif
6734 else
6735 {
6736 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6737 { /* likely */ }
6738 else
6739 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6740 pTlbe->pbMappingR3 = NULL;
6741 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
6742 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6743 &pbMem, &pTlbe->fFlagsAndPhysRev);
6744 AssertRCReturn(rc, rc);
6745# ifdef IN_RING3
6746 pTlbe->pbMappingR3 = pbMem;
6747# endif
6748 }
6749 }
6750 else
6751 {
6752 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
6753
6754 /* This page table walking will set A bits as required by the access while performing the walk.
6755 ASSUMES these are set when the address is translated rather than on commit... */
6756 /** @todo testcase: check when A bits are actually set by the CPU for code. */
6757 PGMPTWALKFAST WalkFast;
6758 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6759 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6760 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6761 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6762 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6763 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6764 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6765 fQPage |= PGMQPAGE_F_USER_MODE;
6766 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6767 if (RT_SUCCESS(rc))
6768 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6769 else
6770 {
6771 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6772# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6773 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6774 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6775# endif
6776 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6777 }
6778
6779 uint32_t fDataBps;
6780 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
6781 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
6782 {
6783 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
6784 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
6785 {
6786 pTlbe--;
6787 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
6788 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
6789 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
6790 }
6791 else
6792 {
6793 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
6794 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
6795 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
6796 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
6797 }
6798 }
6799 else
6800 {
6801 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
6802 to the page with the data access breakpoint armed on it to pass thru here. */
6803 if (fDataBps > 1)
6804 LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6805 fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6806 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6807 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
6808 pTlbe->uTag = uTagNoRev;
6809 }
6810 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
6811 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
6812 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
6813 pTlbe->GCPhys = GCPhysPg;
6814 pTlbe->pbMappingR3 = NULL;
6815 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
6816 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6817 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6818 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6819 || IEM_GET_CPL(pVCpu) != 3
6820 || (fAccess & IEM_ACCESS_WHAT_SYS));
6821
6822 /* Resolve the physical address. */
6823 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
6824 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6825 &pbMem, &pTlbe->fFlagsAndPhysRev);
6826 AssertRCReturn(rc, rc);
6827# ifdef IN_RING3
6828 pTlbe->pbMappingR3 = pbMem;
6829# endif
6830 }
6831
6832 /*
6833 * Check the physical page level access and mapping.
6834 */
6835 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6836 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6837 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6838 { /* probably likely */ }
6839 else
6840 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6841 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6842 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6843 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6844 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6845 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6846
6847 if (pbMem)
6848 {
6849 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6850 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6851 fAccess |= IEM_ACCESS_NOT_LOCKED;
6852 }
6853 else
6854 {
6855 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6856 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6857 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6858 if (rcStrict != VINF_SUCCESS)
6859 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6860 }
6861
6862 void * const pvMem = pbMem;
6863
6864 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6865 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6866 if (fAccess & IEM_ACCESS_TYPE_READ)
6867 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6868
6869#else /* !IEM_WITH_DATA_TLB */
6870
6871 RTGCPHYS GCPhysFirst;
6872 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6873 if (rcStrict != VINF_SUCCESS)
6874 return rcStrict;
6875
6876 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6877 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6878 if (fAccess & IEM_ACCESS_TYPE_READ)
6879 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6880
6881 void *pvMem;
6882 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6883 if (rcStrict != VINF_SUCCESS)
6884 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6885
6886#endif /* !IEM_WITH_DATA_TLB */
6887
6888 /*
6889 * Fill in the mapping table entry.
6890 */
6891 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6892 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6893 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6894 pVCpu->iem.s.cActiveMappings += 1;
6895
6896 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6897 *ppvMem = pvMem;
6898 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6899 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6900 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6901
6902 return VINF_SUCCESS;
6903}
6904
6905
6906/**
6907 * Commits the guest memory if bounce buffered and unmaps it.
6908 *
6909 * @returns Strict VBox status code.
6910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6911 * @param bUnmapInfo Unmap info set by iemMemMap.
6912 */
6913VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6914{
6915 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6916 AssertMsgReturn( (bUnmapInfo & 0x08)
6917 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6918 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6919 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6920 VERR_NOT_FOUND);
6921
6922 /* If it's bounce buffered, we may need to write back the buffer. */
6923 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6924 {
6925 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6926 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6927 }
6928 /* Otherwise unlock it. */
6929 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6930 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6931
6932 /* Free the entry. */
6933 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6934 Assert(pVCpu->iem.s.cActiveMappings != 0);
6935 pVCpu->iem.s.cActiveMappings--;
6936 return VINF_SUCCESS;
6937}
6938
6939
6940/**
6941 * Rolls back the guest memory (conceptually only) and unmaps it.
6942 *
6943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6944 * @param bUnmapInfo Unmap info set by iemMemMap.
6945 */
6946void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6947{
6948 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6949 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6950 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6951 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6952 == ((unsigned)bUnmapInfo >> 4),
6953 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6954
6955 /* Unlock it if necessary. */
6956 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6957 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6958
6959 /* Free the entry. */
6960 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6961 Assert(pVCpu->iem.s.cActiveMappings != 0);
6962 pVCpu->iem.s.cActiveMappings--;
6963}
6964
6965#ifdef IEM_WITH_SETJMP
6966
6967/**
6968 * Maps the specified guest memory for the given kind of access, longjmp on
6969 * error.
6970 *
6971 * This may be using bounce buffering of the memory if it's crossing a page
6972 * boundary or if there is an access handler installed for any of it. Because
6973 * of lock prefix guarantees, we're in for some extra clutter when this
6974 * happens.
6975 *
6976 * This may raise a \#GP, \#SS, \#PF or \#AC.
6977 *
6978 * @returns Pointer to the mapped memory.
6979 *
6980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6981 * @param bUnmapInfo Where to return unmap info to be passed to
6982 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6983 * iemMemCommitAndUnmapWoSafeJmp,
6984 * iemMemCommitAndUnmapRoSafeJmp,
6985 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6986 * when done.
6987 * @param cbMem The number of bytes to map. This is usually 1,
6988 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6989 * string operations it can be up to a page.
6990 * @param iSegReg The index of the segment register to use for
6991 * this access. The base and limits are checked.
6992 * Use UINT8_MAX to indicate that no segmentation
6993 * is required (for IDT, GDT and LDT accesses).
6994 * @param GCPtrMem The address of the guest memory.
6995 * @param fAccess How the memory is being accessed. The
6996 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6997 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6998 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6999 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
7000 * set.
7001 * @param uAlignCtl Alignment control:
7002 * - Bits 15:0 is the alignment mask.
7003 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
7004 * IEM_MEMMAP_F_ALIGN_SSE, and
7005 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
7006 * Pass zero to skip alignment.
7007 * @tparam a_fSafe Whether this is a call from "safe" fallback function in
7008 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that
7009 * needs counting as such in the statistics.
7010 */
7011template<bool a_fSafeCall = false>
7012static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7013 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7014{
7015 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);
7016
7017 /*
7018 * Check the input, check segment access and adjust address
7019 * with segment base.
7020 */
7021 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7022 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
7023 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7024
7025 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7026 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7027 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7028
7029 /*
7030 * Alignment check.
7031 */
7032 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
7033 { /* likelyish */ }
7034 else
7035 {
7036 /* Misaligned access. */
7037 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7038 {
7039 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
7040 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
7041 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
7042 {
7043 AssertCompile(X86_CR0_AM == X86_EFL_AC);
7044
7045 if (iemMemAreAlignmentChecksEnabled(pVCpu))
7046 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7047 }
7048 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
7049 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
7050 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
7051 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
7052 * that's what FXSAVE does on a 10980xe. */
7053 && iemMemAreAlignmentChecksEnabled(pVCpu))
7054 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7055 else
7056 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
7057 }
7058
7059#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
7060 /* If the access is atomic there are host platform alignmnet restrictions
7061 we need to conform with. */
7062 if ( !(fAccess & IEM_ACCESS_ATOMIC)
7063# if defined(RT_ARCH_AMD64)
7064 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
7065# elif defined(RT_ARCH_ARM64)
7066 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
7067# else
7068# error port me
7069# endif
7070 )
7071 { /* okay */ }
7072 else
7073 {
7074 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
7075 pVCpu->iem.s.cMisalignedAtomics += 1;
7076 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
7077 }
7078#endif
7079 }
7080
7081 /*
7082 * Figure out which mapping entry to use.
7083 */
7084 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
7085 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7086 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7087 {
7088 iMemMap = iemMemMapFindFree(pVCpu);
7089 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
7090 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
7091 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
7092 pVCpu->iem.s.aMemMappings[2].fAccess),
7093 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
7094 }
7095
7096 /*
7097 * Crossing a page boundary?
7098 */
7099 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
7100 { /* No (likely). */ }
7101 else
7102 {
7103 void *pvMem;
7104 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
7105 if (rcStrict == VINF_SUCCESS)
7106 return pvMem;
7107 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7108 }
7109
7110#ifdef IEM_WITH_DATA_TLB
7111 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
7112
7113 /*
7114 * Get the TLB entry for this page checking that it has the A & D bits
7115 * set as per fAccess flags.
7116 */
7117 /** @todo make the caller pass these in with fAccess. */
7118 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
7119 ? IEMTLBE_F_PT_NO_USER : 0;
7120 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
7121 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
7122 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
7123 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7124 ? IEMTLBE_F_PT_NO_WRITE : 0)
7125 : 0;
7126 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
7127 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
7128 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
7129 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);
7130 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
7131 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
7132 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
7133 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
7134 {
7135# ifdef IEM_WITH_TLB_STATISTICS
7136 if (a_fSafeCall)
7137 pVCpu->iem.s.DataTlb.cTlbSafeHits++;
7138 else
7139 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
7140# endif
7141 }
7142 else
7143 {
7144 if (a_fSafeCall)
7145 pVCpu->iem.s.DataTlb.cTlbSafeMisses++;
7146 else
7147 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
7148
7149 /* This page table walking will set A and D bits as required by the
7150 access while performing the walk.
7151 ASSUMES these are set when the address is translated rather than on commit... */
7152 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7153 PGMPTWALKFAST WalkFast;
7154 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
7155 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
7156 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
7157 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
7158 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
7159 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
7160 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7161 fQPage |= PGMQPAGE_F_USER_MODE;
7162 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
7163 if (RT_SUCCESS(rc))
7164 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
7165 else
7166 {
7167 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
7168# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7169 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
7170 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
7171# endif
7172 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
7173 }
7174
7175 uint32_t fDataBps;
7176 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
7177 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
7178 {
7179 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
7180 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
7181 {
7182 pTlbe--;
7183 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
7184 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7185 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7186 }
7187 else
7188 {
7189 if (a_fSafeCall)
7190 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
7191 else
7192 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
7193 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
7194 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7195 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7196 }
7197 }
7198 else
7199 {
7200 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
7201 to the page with the data access breakpoint armed on it to pass thru here. */
7202 if (fDataBps > 1)
7203 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
7204 a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7205 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
7206 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
7207 pTlbe->uTag = uTagNoRev;
7208 }
7209 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
7210 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
7211 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7212 pTlbe->GCPhys = GCPhysPg;
7213 pTlbe->pbMappingR3 = NULL;
7214 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7215 Assert(!(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE));
7216 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
7217
7218 /* Resolve the physical address. */
7219 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
7220 uint8_t *pbMemFullLoad = NULL;
7221 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7222 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
7223 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7224# ifdef IN_RING3
7225 pTlbe->pbMappingR3 = pbMemFullLoad;
7226# endif
7227 }
7228
7229 /*
7230 * Check the flags and physical revision.
7231 * Note! This will revalidate the uTlbPhysRev after a full load. This is
7232 * just to keep the code structure simple (i.e. avoid gotos or similar).
7233 */
7234 uint8_t *pbMem;
7235 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
7236 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7237# ifdef IN_RING3
7238 pbMem = pTlbe->pbMappingR3;
7239# else
7240 pbMem = NULL;
7241# endif
7242 else
7243 {
7244 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7245
7246 /*
7247 * Okay, something isn't quite right or needs refreshing.
7248 */
7249 /* Write to read only memory? */
7250 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
7251 {
7252 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7253# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7254/** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
7255 * to trigger an \#PG or a VM nested paging exit here yet! */
7256 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7257 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7258# endif
7259 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7260 }
7261
7262 /* Kernel memory accessed by userland? */
7263 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
7264 {
7265 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7266# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7267/** @todo TLB: See above. */
7268 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7269 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7270# endif
7271 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
7272 }
7273
7274 /*
7275 * Check if the physical page info needs updating.
7276 */
7277 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7278# ifdef IN_RING3
7279 pbMem = pTlbe->pbMappingR3;
7280# else
7281 pbMem = NULL;
7282# endif
7283 else
7284 {
7285 pTlbe->pbMappingR3 = NULL;
7286 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
7287 pbMem = NULL;
7288 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7289 &pbMem, &pTlbe->fFlagsAndPhysRev);
7290 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7291# ifdef IN_RING3
7292 pTlbe->pbMappingR3 = pbMem;
7293# endif
7294 }
7295
7296 /*
7297 * Check the physical page level access and mapping.
7298 */
7299 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
7300 { /* probably likely */ }
7301 else
7302 {
7303 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
7304 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
7305 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
7306 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
7307 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
7308 if (rcStrict == VINF_SUCCESS)
7309 return pbMem;
7310 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7311 }
7312 }
7313 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
7314
7315 if (pbMem)
7316 {
7317 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
7318 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7319 fAccess |= IEM_ACCESS_NOT_LOCKED;
7320 }
7321 else
7322 {
7323 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
7324 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7325 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7326 if (rcStrict == VINF_SUCCESS)
7327 {
7328 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7329 return pbMem;
7330 }
7331 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7332 }
7333
7334 void * const pvMem = pbMem;
7335
7336 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7337 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7338 if (fAccess & IEM_ACCESS_TYPE_READ)
7339 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7340
7341#else /* !IEM_WITH_DATA_TLB */
7342
7343
7344 RTGCPHYS GCPhysFirst;
7345 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
7346 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7347 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7348
7349 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7350 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7351 if (fAccess & IEM_ACCESS_TYPE_READ)
7352 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7353
7354 void *pvMem;
7355 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7356 if (rcStrict == VINF_SUCCESS)
7357 { /* likely */ }
7358 else
7359 {
7360 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
7361 if (rcStrict == VINF_SUCCESS)
7362 return pvMem;
7363 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7364 }
7365
7366#endif /* !IEM_WITH_DATA_TLB */
7367
7368 /*
7369 * Fill in the mapping table entry.
7370 */
7371 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
7372 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
7373 pVCpu->iem.s.iNextMapping = iMemMap + 1;
7374 pVCpu->iem.s.cActiveMappings++;
7375
7376 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
7377
7378 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7379 return pvMem;
7380}
7381
7382
7383/** @see iemMemMapJmp */
7384static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7385 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7386{
7387 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);
7388}
7389
7390
7391/**
7392 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
7393 *
7394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7395 * @param pvMem The mapping.
7396 * @param fAccess The kind of access.
7397 */
7398void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7399{
7400 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7401 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7402 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7403 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7404 == ((unsigned)bUnmapInfo >> 4),
7405 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7406
7407 /* If it's bounce buffered, we may need to write back the buffer. */
7408 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7409 {
7410 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7411 {
7412 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7413 if (rcStrict == VINF_SUCCESS)
7414 return;
7415 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7416 }
7417 }
7418 /* Otherwise unlock it. */
7419 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7420 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7421
7422 /* Free the entry. */
7423 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7424 Assert(pVCpu->iem.s.cActiveMappings != 0);
7425 pVCpu->iem.s.cActiveMappings--;
7426}
7427
7428
7429/** Fallback for iemMemCommitAndUnmapRwJmp. */
7430void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7431{
7432 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7433 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7434}
7435
7436
7437/** Fallback for iemMemCommitAndUnmapAtJmp. */
7438void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7439{
7440 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7441 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7442}
7443
7444
7445/** Fallback for iemMemCommitAndUnmapWoJmp. */
7446void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7447{
7448 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7449 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7450}
7451
7452
7453/** Fallback for iemMemCommitAndUnmapRoJmp. */
7454void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7455{
7456 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7457 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7458}
7459
7460
7461/** Fallback for iemMemRollbackAndUnmapWo. */
7462void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7463{
7464 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7465 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7466}
7467
7468#endif /* IEM_WITH_SETJMP */
7469
7470#ifndef IN_RING3
7471/**
7472 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7473 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7474 *
7475 * Allows the instruction to be completed and retired, while the IEM user will
7476 * return to ring-3 immediately afterwards and do the postponed writes there.
7477 *
7478 * @returns VBox status code (no strict statuses). Caller must check
7479 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7481 * @param pvMem The mapping.
7482 * @param fAccess The kind of access.
7483 */
7484VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7485{
7486 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7487 AssertMsgReturn( (bUnmapInfo & 0x08)
7488 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7489 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7490 == ((unsigned)bUnmapInfo >> 4),
7491 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7492 VERR_NOT_FOUND);
7493
7494 /* If it's bounce buffered, we may need to write back the buffer. */
7495 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7496 {
7497 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7498 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7499 }
7500 /* Otherwise unlock it. */
7501 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7502 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7503
7504 /* Free the entry. */
7505 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7506 Assert(pVCpu->iem.s.cActiveMappings != 0);
7507 pVCpu->iem.s.cActiveMappings--;
7508 return VINF_SUCCESS;
7509}
7510#endif
7511
7512
7513/**
7514 * Rollbacks mappings, releasing page locks and such.
7515 *
7516 * The caller shall only call this after checking cActiveMappings.
7517 *
7518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7519 */
7520void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7521{
7522 Assert(pVCpu->iem.s.cActiveMappings > 0);
7523
7524 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7525 while (iMemMap-- > 0)
7526 {
7527 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7528 if (fAccess != IEM_ACCESS_INVALID)
7529 {
7530 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7531 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7532 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7533 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7534 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7535 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7536 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7537 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7538 pVCpu->iem.s.cActiveMappings--;
7539 }
7540 }
7541}
7542
7543
7544/*
7545 * Instantiate R/W templates.
7546 */
7547#define TMPL_MEM_WITH_STACK
7548
7549#define TMPL_MEM_TYPE uint8_t
7550#define TMPL_MEM_FN_SUFF U8
7551#define TMPL_MEM_FMT_TYPE "%#04x"
7552#define TMPL_MEM_FMT_DESC "byte"
7553#include "IEMAllMemRWTmpl.cpp.h"
7554
7555#define TMPL_MEM_TYPE uint16_t
7556#define TMPL_MEM_FN_SUFF U16
7557#define TMPL_MEM_FMT_TYPE "%#06x"
7558#define TMPL_MEM_FMT_DESC "word"
7559#include "IEMAllMemRWTmpl.cpp.h"
7560
7561#define TMPL_WITH_PUSH_SREG
7562#define TMPL_MEM_TYPE uint32_t
7563#define TMPL_MEM_FN_SUFF U32
7564#define TMPL_MEM_FMT_TYPE "%#010x"
7565#define TMPL_MEM_FMT_DESC "dword"
7566#include "IEMAllMemRWTmpl.cpp.h"
7567#undef TMPL_WITH_PUSH_SREG
7568
7569#define TMPL_MEM_TYPE uint64_t
7570#define TMPL_MEM_FN_SUFF U64
7571#define TMPL_MEM_FMT_TYPE "%#018RX64"
7572#define TMPL_MEM_FMT_DESC "qword"
7573#include "IEMAllMemRWTmpl.cpp.h"
7574
7575#undef TMPL_MEM_WITH_STACK
7576
7577#define TMPL_MEM_TYPE uint32_t
7578#define TMPL_MEM_TYPE_ALIGN 0
7579#define TMPL_MEM_FN_SUFF U32NoAc
7580#define TMPL_MEM_FMT_TYPE "%#010x"
7581#define TMPL_MEM_FMT_DESC "dword"
7582#include "IEMAllMemRWTmpl.cpp.h"
7583#undef TMPL_WITH_PUSH_SREG
7584
7585#define TMPL_MEM_TYPE uint64_t
7586#define TMPL_MEM_TYPE_ALIGN 0
7587#define TMPL_MEM_FN_SUFF U64NoAc
7588#define TMPL_MEM_FMT_TYPE "%#018RX64"
7589#define TMPL_MEM_FMT_DESC "qword"
7590#include "IEMAllMemRWTmpl.cpp.h"
7591
7592#define TMPL_MEM_TYPE uint64_t
7593#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7594#define TMPL_MEM_FN_SUFF U64AlignedU128
7595#define TMPL_MEM_FMT_TYPE "%#018RX64"
7596#define TMPL_MEM_FMT_DESC "qword"
7597#include "IEMAllMemRWTmpl.cpp.h"
7598
7599/* See IEMAllMemRWTmplInline.cpp.h */
7600#define TMPL_MEM_BY_REF
7601
7602#define TMPL_MEM_TYPE RTFLOAT80U
7603#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7604#define TMPL_MEM_FN_SUFF R80
7605#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7606#define TMPL_MEM_FMT_DESC "tword"
7607#include "IEMAllMemRWTmpl.cpp.h"
7608
7609#define TMPL_MEM_TYPE RTPBCD80U
7610#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7611#define TMPL_MEM_FN_SUFF D80
7612#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7613#define TMPL_MEM_FMT_DESC "tword"
7614#include "IEMAllMemRWTmpl.cpp.h"
7615
7616#define TMPL_MEM_TYPE RTUINT128U
7617#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7618#define TMPL_MEM_FN_SUFF U128
7619#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7620#define TMPL_MEM_FMT_DESC "dqword"
7621#include "IEMAllMemRWTmpl.cpp.h"
7622
7623#define TMPL_MEM_TYPE RTUINT128U
7624#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7625#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
7626#define TMPL_MEM_FN_SUFF U128AlignedSse
7627#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7628#define TMPL_MEM_FMT_DESC "dqword"
7629#include "IEMAllMemRWTmpl.cpp.h"
7630
7631#define TMPL_MEM_TYPE RTUINT128U
7632#define TMPL_MEM_TYPE_ALIGN 0
7633#define TMPL_MEM_FN_SUFF U128NoAc
7634#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7635#define TMPL_MEM_FMT_DESC "dqword"
7636#include "IEMAllMemRWTmpl.cpp.h"
7637
7638#define TMPL_MEM_TYPE RTUINT256U
7639#define TMPL_MEM_TYPE_ALIGN 0
7640#define TMPL_MEM_FN_SUFF U256NoAc
7641#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7642#define TMPL_MEM_FMT_DESC "qqword"
7643#include "IEMAllMemRWTmpl.cpp.h"
7644
7645#define TMPL_MEM_TYPE RTUINT256U
7646#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
7647#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
7648#define TMPL_MEM_FN_SUFF U256AlignedAvx
7649#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7650#define TMPL_MEM_FMT_DESC "qqword"
7651#include "IEMAllMemRWTmpl.cpp.h"
7652
7653/**
7654 * Fetches a data dword and zero extends it to a qword.
7655 *
7656 * @returns Strict VBox status code.
7657 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7658 * @param pu64Dst Where to return the qword.
7659 * @param iSegReg The index of the segment register to use for
7660 * this access. The base and limits are checked.
7661 * @param GCPtrMem The address of the guest memory.
7662 */
7663VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7664{
7665 /* The lazy approach for now... */
7666 uint8_t bUnmapInfo;
7667 uint32_t const *pu32Src;
7668 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7669 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7670 if (rc == VINF_SUCCESS)
7671 {
7672 *pu64Dst = *pu32Src;
7673 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7674 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7675 }
7676 return rc;
7677}
7678
7679
7680#ifdef SOME_UNUSED_FUNCTION
7681/**
7682 * Fetches a data dword and sign extends it to a qword.
7683 *
7684 * @returns Strict VBox status code.
7685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7686 * @param pu64Dst Where to return the sign extended value.
7687 * @param iSegReg The index of the segment register to use for
7688 * this access. The base and limits are checked.
7689 * @param GCPtrMem The address of the guest memory.
7690 */
7691VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7692{
7693 /* The lazy approach for now... */
7694 uint8_t bUnmapInfo;
7695 int32_t const *pi32Src;
7696 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7697 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7698 if (rc == VINF_SUCCESS)
7699 {
7700 *pu64Dst = *pi32Src;
7701 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7702 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7703 }
7704#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7705 else
7706 *pu64Dst = 0;
7707#endif
7708 return rc;
7709}
7710#endif
7711
7712
7713/**
7714 * Fetches a descriptor register (lgdt, lidt).
7715 *
7716 * @returns Strict VBox status code.
7717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7718 * @param pcbLimit Where to return the limit.
7719 * @param pGCPtrBase Where to return the base.
7720 * @param iSegReg The index of the segment register to use for
7721 * this access. The base and limits are checked.
7722 * @param GCPtrMem The address of the guest memory.
7723 * @param enmOpSize The effective operand size.
7724 */
7725VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7726 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7727{
7728 /*
7729 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7730 * little special:
7731 * - The two reads are done separately.
7732 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7733 * - We suspect the 386 to actually commit the limit before the base in
7734 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7735 * don't try emulate this eccentric behavior, because it's not well
7736 * enough understood and rather hard to trigger.
7737 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7738 */
7739 VBOXSTRICTRC rcStrict;
7740 if (IEM_IS_64BIT_CODE(pVCpu))
7741 {
7742 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7743 if (rcStrict == VINF_SUCCESS)
7744 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7745 }
7746 else
7747 {
7748 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7749 if (enmOpSize == IEMMODE_32BIT)
7750 {
7751 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7752 {
7753 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7754 if (rcStrict == VINF_SUCCESS)
7755 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7756 }
7757 else
7758 {
7759 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7760 if (rcStrict == VINF_SUCCESS)
7761 {
7762 *pcbLimit = (uint16_t)uTmp;
7763 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7764 }
7765 }
7766 if (rcStrict == VINF_SUCCESS)
7767 *pGCPtrBase = uTmp;
7768 }
7769 else
7770 {
7771 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7772 if (rcStrict == VINF_SUCCESS)
7773 {
7774 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7775 if (rcStrict == VINF_SUCCESS)
7776 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7777 }
7778 }
7779 }
7780 return rcStrict;
7781}
7782
7783
7784/**
7785 * Stores a data dqword, SSE aligned.
7786 *
7787 * @returns Strict VBox status code.
7788 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7789 * @param iSegReg The index of the segment register to use for
7790 * this access. The base and limits are checked.
7791 * @param GCPtrMem The address of the guest memory.
7792 * @param u128Value The value to store.
7793 */
7794VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7795{
7796 /* The lazy approach for now... */
7797 uint8_t bUnmapInfo;
7798 PRTUINT128U pu128Dst;
7799 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7800 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7801 if (rc == VINF_SUCCESS)
7802 {
7803 pu128Dst->au64[0] = u128Value.au64[0];
7804 pu128Dst->au64[1] = u128Value.au64[1];
7805 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7806 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7807 }
7808 return rc;
7809}
7810
7811
7812#ifdef IEM_WITH_SETJMP
7813/**
7814 * Stores a data dqword, SSE aligned.
7815 *
7816 * @returns Strict VBox status code.
7817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7818 * @param iSegReg The index of the segment register to use for
7819 * this access. The base and limits are checked.
7820 * @param GCPtrMem The address of the guest memory.
7821 * @param u128Value The value to store.
7822 */
7823void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7824 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7825{
7826 /* The lazy approach for now... */
7827 uint8_t bUnmapInfo;
7828 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7829 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7830 pu128Dst->au64[0] = u128Value.au64[0];
7831 pu128Dst->au64[1] = u128Value.au64[1];
7832 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7833 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7834}
7835#endif
7836
7837
7838/**
7839 * Stores a data dqword.
7840 *
7841 * @returns Strict VBox status code.
7842 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7843 * @param iSegReg The index of the segment register to use for
7844 * this access. The base and limits are checked.
7845 * @param GCPtrMem The address of the guest memory.
7846 * @param pu256Value Pointer to the value to store.
7847 */
7848VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7849{
7850 /* The lazy approach for now... */
7851 uint8_t bUnmapInfo;
7852 PRTUINT256U pu256Dst;
7853 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7854 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7855 if (rc == VINF_SUCCESS)
7856 {
7857 pu256Dst->au64[0] = pu256Value->au64[0];
7858 pu256Dst->au64[1] = pu256Value->au64[1];
7859 pu256Dst->au64[2] = pu256Value->au64[2];
7860 pu256Dst->au64[3] = pu256Value->au64[3];
7861 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7862 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7863 }
7864 return rc;
7865}
7866
7867
7868#ifdef IEM_WITH_SETJMP
7869/**
7870 * Stores a data dqword, longjmp on error.
7871 *
7872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7873 * @param iSegReg The index of the segment register to use for
7874 * this access. The base and limits are checked.
7875 * @param GCPtrMem The address of the guest memory.
7876 * @param pu256Value Pointer to the value to store.
7877 */
7878void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7879{
7880 /* The lazy approach for now... */
7881 uint8_t bUnmapInfo;
7882 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7883 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7884 pu256Dst->au64[0] = pu256Value->au64[0];
7885 pu256Dst->au64[1] = pu256Value->au64[1];
7886 pu256Dst->au64[2] = pu256Value->au64[2];
7887 pu256Dst->au64[3] = pu256Value->au64[3];
7888 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7889 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7890}
7891#endif
7892
7893
7894/**
7895 * Stores a descriptor register (sgdt, sidt).
7896 *
7897 * @returns Strict VBox status code.
7898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7899 * @param cbLimit The limit.
7900 * @param GCPtrBase The base address.
7901 * @param iSegReg The index of the segment register to use for
7902 * this access. The base and limits are checked.
7903 * @param GCPtrMem The address of the guest memory.
7904 */
7905VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7906{
7907 /*
7908 * The SIDT and SGDT instructions actually stores the data using two
7909 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7910 * does not respond to opsize prefixes.
7911 */
7912 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7913 if (rcStrict == VINF_SUCCESS)
7914 {
7915 if (IEM_IS_16BIT_CODE(pVCpu))
7916 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7917 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7918 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7919 else if (IEM_IS_32BIT_CODE(pVCpu))
7920 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7921 else
7922 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7923 }
7924 return rcStrict;
7925}
7926
7927
7928/**
7929 * Begin a special stack push (used by interrupt, exceptions and such).
7930 *
7931 * This will raise \#SS or \#PF if appropriate.
7932 *
7933 * @returns Strict VBox status code.
7934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7935 * @param cbMem The number of bytes to push onto the stack.
7936 * @param cbAlign The alignment mask (7, 3, 1).
7937 * @param ppvMem Where to return the pointer to the stack memory.
7938 * As with the other memory functions this could be
7939 * direct access or bounce buffered access, so
7940 * don't commit register until the commit call
7941 * succeeds.
7942 * @param pbUnmapInfo Where to store unmap info for
7943 * iemMemStackPushCommitSpecial.
7944 * @param puNewRsp Where to return the new RSP value. This must be
7945 * passed unchanged to
7946 * iemMemStackPushCommitSpecial().
7947 */
7948VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7949 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7950{
7951 Assert(cbMem < UINT8_MAX);
7952 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7953 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7954}
7955
7956
7957/**
7958 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7959 *
7960 * This will update the rSP.
7961 *
7962 * @returns Strict VBox status code.
7963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7964 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7965 * @param uNewRsp The new RSP value returned by
7966 * iemMemStackPushBeginSpecial().
7967 */
7968VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7969{
7970 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7971 if (rcStrict == VINF_SUCCESS)
7972 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7973 return rcStrict;
7974}
7975
7976
7977/**
7978 * Begin a special stack pop (used by iret, retf and such).
7979 *
7980 * This will raise \#SS or \#PF if appropriate.
7981 *
7982 * @returns Strict VBox status code.
7983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7984 * @param cbMem The number of bytes to pop from the stack.
7985 * @param cbAlign The alignment mask (7, 3, 1).
7986 * @param ppvMem Where to return the pointer to the stack memory.
7987 * @param pbUnmapInfo Where to store unmap info for
7988 * iemMemStackPopDoneSpecial.
7989 * @param puNewRsp Where to return the new RSP value. This must be
7990 * assigned to CPUMCTX::rsp manually some time
7991 * after iemMemStackPopDoneSpecial() has been
7992 * called.
7993 */
7994VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7995 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7996{
7997 Assert(cbMem < UINT8_MAX);
7998 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7999 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8000}
8001
8002
8003/**
8004 * Continue a special stack pop (used by iret and retf), for the purpose of
8005 * retrieving a new stack pointer.
8006 *
8007 * This will raise \#SS or \#PF if appropriate.
8008 *
8009 * @returns Strict VBox status code.
8010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8011 * @param off Offset from the top of the stack. This is zero
8012 * except in the retf case.
8013 * @param cbMem The number of bytes to pop from the stack.
8014 * @param ppvMem Where to return the pointer to the stack memory.
8015 * @param pbUnmapInfo Where to store unmap info for
8016 * iemMemStackPopDoneSpecial.
8017 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8018 * return this because all use of this function is
8019 * to retrieve a new value and anything we return
8020 * here would be discarded.)
8021 */
8022VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8023 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
8024{
8025 Assert(cbMem < UINT8_MAX);
8026
8027 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8028 RTGCPTR GCPtrTop;
8029 if (IEM_IS_64BIT_CODE(pVCpu))
8030 GCPtrTop = uCurNewRsp;
8031 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8032 GCPtrTop = (uint32_t)uCurNewRsp;
8033 else
8034 GCPtrTop = (uint16_t)uCurNewRsp;
8035
8036 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8037 0 /* checked in iemMemStackPopBeginSpecial */);
8038}
8039
8040
8041/**
8042 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8043 * iemMemStackPopContinueSpecial).
8044 *
8045 * The caller will manually commit the rSP.
8046 *
8047 * @returns Strict VBox status code.
8048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8049 * @param bUnmapInfo Unmap information returned by
8050 * iemMemStackPopBeginSpecial() or
8051 * iemMemStackPopContinueSpecial().
8052 */
8053VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
8054{
8055 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8056}
8057
8058
8059/**
8060 * Fetches a system table byte.
8061 *
8062 * @returns Strict VBox status code.
8063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8064 * @param pbDst Where to return the byte.
8065 * @param iSegReg The index of the segment register to use for
8066 * this access. The base and limits are checked.
8067 * @param GCPtrMem The address of the guest memory.
8068 */
8069VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8070{
8071 /* The lazy approach for now... */
8072 uint8_t bUnmapInfo;
8073 uint8_t const *pbSrc;
8074 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8075 if (rc == VINF_SUCCESS)
8076 {
8077 *pbDst = *pbSrc;
8078 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8079 }
8080 return rc;
8081}
8082
8083
8084/**
8085 * Fetches a system table word.
8086 *
8087 * @returns Strict VBox status code.
8088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8089 * @param pu16Dst Where to return the word.
8090 * @param iSegReg The index of the segment register to use for
8091 * this access. The base and limits are checked.
8092 * @param GCPtrMem The address of the guest memory.
8093 */
8094VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8095{
8096 /* The lazy approach for now... */
8097 uint8_t bUnmapInfo;
8098 uint16_t const *pu16Src;
8099 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8100 if (rc == VINF_SUCCESS)
8101 {
8102 *pu16Dst = *pu16Src;
8103 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8104 }
8105 return rc;
8106}
8107
8108
8109/**
8110 * Fetches a system table dword.
8111 *
8112 * @returns Strict VBox status code.
8113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8114 * @param pu32Dst Where to return the dword.
8115 * @param iSegReg The index of the segment register to use for
8116 * this access. The base and limits are checked.
8117 * @param GCPtrMem The address of the guest memory.
8118 */
8119VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8120{
8121 /* The lazy approach for now... */
8122 uint8_t bUnmapInfo;
8123 uint32_t const *pu32Src;
8124 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8125 if (rc == VINF_SUCCESS)
8126 {
8127 *pu32Dst = *pu32Src;
8128 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8129 }
8130 return rc;
8131}
8132
8133
8134/**
8135 * Fetches a system table qword.
8136 *
8137 * @returns Strict VBox status code.
8138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8139 * @param pu64Dst Where to return the qword.
8140 * @param iSegReg The index of the segment register to use for
8141 * this access. The base and limits are checked.
8142 * @param GCPtrMem The address of the guest memory.
8143 */
8144VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8145{
8146 /* The lazy approach for now... */
8147 uint8_t bUnmapInfo;
8148 uint64_t const *pu64Src;
8149 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8150 if (rc == VINF_SUCCESS)
8151 {
8152 *pu64Dst = *pu64Src;
8153 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8154 }
8155 return rc;
8156}
8157
8158
8159/**
8160 * Fetches a descriptor table entry with caller specified error code.
8161 *
8162 * @returns Strict VBox status code.
8163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8164 * @param pDesc Where to return the descriptor table entry.
8165 * @param uSel The selector which table entry to fetch.
8166 * @param uXcpt The exception to raise on table lookup error.
8167 * @param uErrorCode The error code associated with the exception.
8168 */
8169static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8170 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8171{
8172 AssertPtr(pDesc);
8173 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8174
8175 /** @todo did the 286 require all 8 bytes to be accessible? */
8176 /*
8177 * Get the selector table base and check bounds.
8178 */
8179 RTGCPTR GCPtrBase;
8180 if (uSel & X86_SEL_LDT)
8181 {
8182 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8183 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8184 {
8185 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8186 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8187 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8188 uErrorCode, 0);
8189 }
8190
8191 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8192 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8193 }
8194 else
8195 {
8196 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8197 {
8198 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8199 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8200 uErrorCode, 0);
8201 }
8202 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8203 }
8204
8205 /*
8206 * Read the legacy descriptor and maybe the long mode extensions if
8207 * required.
8208 */
8209 VBOXSTRICTRC rcStrict;
8210 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8211 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8212 else
8213 {
8214 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8215 if (rcStrict == VINF_SUCCESS)
8216 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8217 if (rcStrict == VINF_SUCCESS)
8218 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8219 if (rcStrict == VINF_SUCCESS)
8220 pDesc->Legacy.au16[3] = 0;
8221 else
8222 return rcStrict;
8223 }
8224
8225 if (rcStrict == VINF_SUCCESS)
8226 {
8227 if ( !IEM_IS_LONG_MODE(pVCpu)
8228 || pDesc->Legacy.Gen.u1DescType)
8229 pDesc->Long.au64[1] = 0;
8230 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8231 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8232 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8233 else
8234 {
8235 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8236 /** @todo is this the right exception? */
8237 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8238 }
8239 }
8240 return rcStrict;
8241}
8242
8243
8244/**
8245 * Fetches a descriptor table entry.
8246 *
8247 * @returns Strict VBox status code.
8248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8249 * @param pDesc Where to return the descriptor table entry.
8250 * @param uSel The selector which table entry to fetch.
8251 * @param uXcpt The exception to raise on table lookup error.
8252 */
8253VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8254{
8255 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8256}
8257
8258
8259/**
8260 * Marks the selector descriptor as accessed (only non-system descriptors).
8261 *
8262 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8263 * will therefore skip the limit checks.
8264 *
8265 * @returns Strict VBox status code.
8266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8267 * @param uSel The selector.
8268 */
8269VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8270{
8271 /*
8272 * Get the selector table base and calculate the entry address.
8273 */
8274 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8275 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8276 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8277 GCPtr += uSel & X86_SEL_MASK;
8278
8279 /*
8280 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8281 * ugly stuff to avoid this. This will make sure it's an atomic access
8282 * as well more or less remove any question about 8-bit or 32-bit accesss.
8283 */
8284 VBOXSTRICTRC rcStrict;
8285 uint8_t bUnmapInfo;
8286 uint32_t volatile *pu32;
8287 if ((GCPtr & 3) == 0)
8288 {
8289 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8290 GCPtr += 2 + 2;
8291 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8292 if (rcStrict != VINF_SUCCESS)
8293 return rcStrict;
8294 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8295 }
8296 else
8297 {
8298 /* The misaligned GDT/LDT case, map the whole thing. */
8299 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8300 if (rcStrict != VINF_SUCCESS)
8301 return rcStrict;
8302 switch ((uintptr_t)pu32 & 3)
8303 {
8304 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8305 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8306 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8307 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8308 }
8309 }
8310
8311 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8312}
8313
8314
8315#undef LOG_GROUP
8316#define LOG_GROUP LOG_GROUP_IEM
8317
8318/** @} */
8319
8320/** @name Opcode Helpers.
8321 * @{
8322 */
8323
8324/**
8325 * Calculates the effective address of a ModR/M memory operand.
8326 *
8327 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8328 *
8329 * @return Strict VBox status code.
8330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8331 * @param bRm The ModRM byte.
8332 * @param cbImmAndRspOffset - First byte: The size of any immediate
8333 * following the effective address opcode bytes
8334 * (only for RIP relative addressing).
8335 * - Second byte: RSP displacement (for POP [ESP]).
8336 * @param pGCPtrEff Where to return the effective address.
8337 */
8338VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8339{
8340 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8341# define SET_SS_DEF() \
8342 do \
8343 { \
8344 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8345 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8346 } while (0)
8347
8348 if (!IEM_IS_64BIT_CODE(pVCpu))
8349 {
8350/** @todo Check the effective address size crap! */
8351 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8352 {
8353 uint16_t u16EffAddr;
8354
8355 /* Handle the disp16 form with no registers first. */
8356 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8357 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8358 else
8359 {
8360 /* Get the displacment. */
8361 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8362 {
8363 case 0: u16EffAddr = 0; break;
8364 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8365 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8366 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8367 }
8368
8369 /* Add the base and index registers to the disp. */
8370 switch (bRm & X86_MODRM_RM_MASK)
8371 {
8372 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8373 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8374 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8375 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8376 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8377 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8378 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8379 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8380 }
8381 }
8382
8383 *pGCPtrEff = u16EffAddr;
8384 }
8385 else
8386 {
8387 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8388 uint32_t u32EffAddr;
8389
8390 /* Handle the disp32 form with no registers first. */
8391 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8392 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8393 else
8394 {
8395 /* Get the register (or SIB) value. */
8396 switch ((bRm & X86_MODRM_RM_MASK))
8397 {
8398 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8399 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8400 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8401 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8402 case 4: /* SIB */
8403 {
8404 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8405
8406 /* Get the index and scale it. */
8407 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8408 {
8409 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8410 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8411 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8412 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8413 case 4: u32EffAddr = 0; /*none */ break;
8414 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8415 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8416 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8417 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8418 }
8419 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8420
8421 /* add base */
8422 switch (bSib & X86_SIB_BASE_MASK)
8423 {
8424 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8425 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8426 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8427 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8428 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8429 case 5:
8430 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8431 {
8432 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8433 SET_SS_DEF();
8434 }
8435 else
8436 {
8437 uint32_t u32Disp;
8438 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8439 u32EffAddr += u32Disp;
8440 }
8441 break;
8442 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8443 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8444 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8445 }
8446 break;
8447 }
8448 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8449 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8450 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8451 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8452 }
8453
8454 /* Get and add the displacement. */
8455 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8456 {
8457 case 0:
8458 break;
8459 case 1:
8460 {
8461 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8462 u32EffAddr += i8Disp;
8463 break;
8464 }
8465 case 2:
8466 {
8467 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8468 u32EffAddr += u32Disp;
8469 break;
8470 }
8471 default:
8472 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8473 }
8474
8475 }
8476 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8477 *pGCPtrEff = u32EffAddr;
8478 }
8479 }
8480 else
8481 {
8482 uint64_t u64EffAddr;
8483
8484 /* Handle the rip+disp32 form with no registers first. */
8485 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8486 {
8487 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8488 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8489 }
8490 else
8491 {
8492 /* Get the register (or SIB) value. */
8493 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8494 {
8495 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8496 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8497 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8498 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8499 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8500 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8501 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8502 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8503 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8504 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8505 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8506 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8507 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8508 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8509 /* SIB */
8510 case 4:
8511 case 12:
8512 {
8513 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8514
8515 /* Get the index and scale it. */
8516 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8517 {
8518 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8519 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8520 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8521 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8522 case 4: u64EffAddr = 0; /*none */ break;
8523 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8524 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8525 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8526 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8527 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8528 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8529 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8530 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8531 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8532 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8533 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8534 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8535 }
8536 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8537
8538 /* add base */
8539 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8540 {
8541 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8542 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8543 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8544 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8545 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8546 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8547 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8548 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8549 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8550 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8551 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8552 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8553 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8554 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8555 /* complicated encodings */
8556 case 5:
8557 case 13:
8558 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8559 {
8560 if (!pVCpu->iem.s.uRexB)
8561 {
8562 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8563 SET_SS_DEF();
8564 }
8565 else
8566 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8567 }
8568 else
8569 {
8570 uint32_t u32Disp;
8571 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8572 u64EffAddr += (int32_t)u32Disp;
8573 }
8574 break;
8575 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8576 }
8577 break;
8578 }
8579 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8580 }
8581
8582 /* Get and add the displacement. */
8583 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8584 {
8585 case 0:
8586 break;
8587 case 1:
8588 {
8589 int8_t i8Disp;
8590 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8591 u64EffAddr += i8Disp;
8592 break;
8593 }
8594 case 2:
8595 {
8596 uint32_t u32Disp;
8597 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8598 u64EffAddr += (int32_t)u32Disp;
8599 break;
8600 }
8601 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8602 }
8603
8604 }
8605
8606 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8607 *pGCPtrEff = u64EffAddr;
8608 else
8609 {
8610 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8611 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8612 }
8613 }
8614
8615 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8616 return VINF_SUCCESS;
8617}
8618
8619
8620#ifdef IEM_WITH_SETJMP
8621/**
8622 * Calculates the effective address of a ModR/M memory operand.
8623 *
8624 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8625 *
8626 * May longjmp on internal error.
8627 *
8628 * @return The effective address.
8629 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8630 * @param bRm The ModRM byte.
8631 * @param cbImmAndRspOffset - First byte: The size of any immediate
8632 * following the effective address opcode bytes
8633 * (only for RIP relative addressing).
8634 * - Second byte: RSP displacement (for POP [ESP]).
8635 */
8636RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8637{
8638 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8639# define SET_SS_DEF() \
8640 do \
8641 { \
8642 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8643 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8644 } while (0)
8645
8646 if (!IEM_IS_64BIT_CODE(pVCpu))
8647 {
8648/** @todo Check the effective address size crap! */
8649 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8650 {
8651 uint16_t u16EffAddr;
8652
8653 /* Handle the disp16 form with no registers first. */
8654 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8655 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8656 else
8657 {
8658 /* Get the displacment. */
8659 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8660 {
8661 case 0: u16EffAddr = 0; break;
8662 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8663 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8664 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8665 }
8666
8667 /* Add the base and index registers to the disp. */
8668 switch (bRm & X86_MODRM_RM_MASK)
8669 {
8670 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8671 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8672 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8673 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8674 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8675 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8676 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8677 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8678 }
8679 }
8680
8681 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8682 return u16EffAddr;
8683 }
8684
8685 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8686 uint32_t u32EffAddr;
8687
8688 /* Handle the disp32 form with no registers first. */
8689 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8690 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8691 else
8692 {
8693 /* Get the register (or SIB) value. */
8694 switch ((bRm & X86_MODRM_RM_MASK))
8695 {
8696 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8697 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8698 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8699 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8700 case 4: /* SIB */
8701 {
8702 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8703
8704 /* Get the index and scale it. */
8705 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8706 {
8707 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8708 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8709 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8710 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8711 case 4: u32EffAddr = 0; /*none */ break;
8712 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8713 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8714 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8715 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8716 }
8717 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8718
8719 /* add base */
8720 switch (bSib & X86_SIB_BASE_MASK)
8721 {
8722 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8723 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8724 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8725 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8726 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8727 case 5:
8728 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8729 {
8730 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8731 SET_SS_DEF();
8732 }
8733 else
8734 {
8735 uint32_t u32Disp;
8736 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8737 u32EffAddr += u32Disp;
8738 }
8739 break;
8740 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8741 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8742 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8743 }
8744 break;
8745 }
8746 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8747 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8748 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8749 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8750 }
8751
8752 /* Get and add the displacement. */
8753 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8754 {
8755 case 0:
8756 break;
8757 case 1:
8758 {
8759 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8760 u32EffAddr += i8Disp;
8761 break;
8762 }
8763 case 2:
8764 {
8765 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8766 u32EffAddr += u32Disp;
8767 break;
8768 }
8769 default:
8770 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8771 }
8772 }
8773
8774 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8775 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8776 return u32EffAddr;
8777 }
8778
8779 uint64_t u64EffAddr;
8780
8781 /* Handle the rip+disp32 form with no registers first. */
8782 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8783 {
8784 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8785 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8786 }
8787 else
8788 {
8789 /* Get the register (or SIB) value. */
8790 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8791 {
8792 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8793 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8794 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8795 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8796 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8797 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8798 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8799 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8800 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8801 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8802 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8803 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8804 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8805 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8806 /* SIB */
8807 case 4:
8808 case 12:
8809 {
8810 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8811
8812 /* Get the index and scale it. */
8813 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8814 {
8815 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8816 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8817 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8818 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8819 case 4: u64EffAddr = 0; /*none */ break;
8820 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8821 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8822 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8823 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8824 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8825 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8826 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8827 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8828 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8829 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8830 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8831 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8832 }
8833 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8834
8835 /* add base */
8836 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8837 {
8838 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8839 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8840 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8841 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8842 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8843 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8844 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8845 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8846 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8847 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8848 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8849 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8850 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8851 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8852 /* complicated encodings */
8853 case 5:
8854 case 13:
8855 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8856 {
8857 if (!pVCpu->iem.s.uRexB)
8858 {
8859 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8860 SET_SS_DEF();
8861 }
8862 else
8863 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8864 }
8865 else
8866 {
8867 uint32_t u32Disp;
8868 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8869 u64EffAddr += (int32_t)u32Disp;
8870 }
8871 break;
8872 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8873 }
8874 break;
8875 }
8876 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8877 }
8878
8879 /* Get and add the displacement. */
8880 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8881 {
8882 case 0:
8883 break;
8884 case 1:
8885 {
8886 int8_t i8Disp;
8887 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8888 u64EffAddr += i8Disp;
8889 break;
8890 }
8891 case 2:
8892 {
8893 uint32_t u32Disp;
8894 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8895 u64EffAddr += (int32_t)u32Disp;
8896 break;
8897 }
8898 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8899 }
8900
8901 }
8902
8903 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8904 {
8905 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8906 return u64EffAddr;
8907 }
8908 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8909 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8910 return u64EffAddr & UINT32_MAX;
8911}
8912#endif /* IEM_WITH_SETJMP */
8913
8914
8915/**
8916 * Calculates the effective address of a ModR/M memory operand, extended version
8917 * for use in the recompilers.
8918 *
8919 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8920 *
8921 * @return Strict VBox status code.
8922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8923 * @param bRm The ModRM byte.
8924 * @param cbImmAndRspOffset - First byte: The size of any immediate
8925 * following the effective address opcode bytes
8926 * (only for RIP relative addressing).
8927 * - Second byte: RSP displacement (for POP [ESP]).
8928 * @param pGCPtrEff Where to return the effective address.
8929 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8930 * SIB byte (bits 39:32).
8931 */
8932VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8933{
8934 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8935# define SET_SS_DEF() \
8936 do \
8937 { \
8938 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8939 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8940 } while (0)
8941
8942 uint64_t uInfo;
8943 if (!IEM_IS_64BIT_CODE(pVCpu))
8944 {
8945/** @todo Check the effective address size crap! */
8946 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8947 {
8948 uint16_t u16EffAddr;
8949
8950 /* Handle the disp16 form with no registers first. */
8951 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8952 {
8953 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8954 uInfo = u16EffAddr;
8955 }
8956 else
8957 {
8958 /* Get the displacment. */
8959 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8960 {
8961 case 0: u16EffAddr = 0; break;
8962 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8963 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8964 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8965 }
8966 uInfo = u16EffAddr;
8967
8968 /* Add the base and index registers to the disp. */
8969 switch (bRm & X86_MODRM_RM_MASK)
8970 {
8971 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8972 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8973 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8974 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8975 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8976 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8977 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8978 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8979 }
8980 }
8981
8982 *pGCPtrEff = u16EffAddr;
8983 }
8984 else
8985 {
8986 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8987 uint32_t u32EffAddr;
8988
8989 /* Handle the disp32 form with no registers first. */
8990 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8991 {
8992 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8993 uInfo = u32EffAddr;
8994 }
8995 else
8996 {
8997 /* Get the register (or SIB) value. */
8998 uInfo = 0;
8999 switch ((bRm & X86_MODRM_RM_MASK))
9000 {
9001 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9002 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9003 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9004 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9005 case 4: /* SIB */
9006 {
9007 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9008 uInfo = (uint64_t)bSib << 32;
9009
9010 /* Get the index and scale it. */
9011 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9012 {
9013 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9014 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9015 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9016 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9017 case 4: u32EffAddr = 0; /*none */ break;
9018 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9019 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9020 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9021 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9022 }
9023 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9024
9025 /* add base */
9026 switch (bSib & X86_SIB_BASE_MASK)
9027 {
9028 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9029 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9030 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9031 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9032 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9033 case 5:
9034 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9035 {
9036 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9037 SET_SS_DEF();
9038 }
9039 else
9040 {
9041 uint32_t u32Disp;
9042 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9043 u32EffAddr += u32Disp;
9044 uInfo |= u32Disp;
9045 }
9046 break;
9047 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9048 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9049 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9050 }
9051 break;
9052 }
9053 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9054 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9055 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9056 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9057 }
9058
9059 /* Get and add the displacement. */
9060 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9061 {
9062 case 0:
9063 break;
9064 case 1:
9065 {
9066 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9067 u32EffAddr += i8Disp;
9068 uInfo |= (uint32_t)(int32_t)i8Disp;
9069 break;
9070 }
9071 case 2:
9072 {
9073 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9074 u32EffAddr += u32Disp;
9075 uInfo |= (uint32_t)u32Disp;
9076 break;
9077 }
9078 default:
9079 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9080 }
9081
9082 }
9083 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9084 *pGCPtrEff = u32EffAddr;
9085 }
9086 }
9087 else
9088 {
9089 uint64_t u64EffAddr;
9090
9091 /* Handle the rip+disp32 form with no registers first. */
9092 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9093 {
9094 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9095 uInfo = (uint32_t)u64EffAddr;
9096 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9097 }
9098 else
9099 {
9100 /* Get the register (or SIB) value. */
9101 uInfo = 0;
9102 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9103 {
9104 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9105 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9106 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9107 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9108 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9109 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9110 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9111 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9112 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9113 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9114 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9115 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9116 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9117 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9118 /* SIB */
9119 case 4:
9120 case 12:
9121 {
9122 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9123 uInfo = (uint64_t)bSib << 32;
9124
9125 /* Get the index and scale it. */
9126 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9127 {
9128 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9129 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9130 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9131 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9132 case 4: u64EffAddr = 0; /*none */ break;
9133 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9134 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9135 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9136 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9137 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9138 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9139 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9140 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9141 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9142 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9143 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9144 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9145 }
9146 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9147
9148 /* add base */
9149 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9150 {
9151 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9152 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9153 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9154 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9155 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9156 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9157 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9158 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9159 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9160 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9161 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9162 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9163 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9164 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9165 /* complicated encodings */
9166 case 5:
9167 case 13:
9168 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9169 {
9170 if (!pVCpu->iem.s.uRexB)
9171 {
9172 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9173 SET_SS_DEF();
9174 }
9175 else
9176 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9177 }
9178 else
9179 {
9180 uint32_t u32Disp;
9181 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9182 u64EffAddr += (int32_t)u32Disp;
9183 uInfo |= u32Disp;
9184 }
9185 break;
9186 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9187 }
9188 break;
9189 }
9190 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9191 }
9192
9193 /* Get and add the displacement. */
9194 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9195 {
9196 case 0:
9197 break;
9198 case 1:
9199 {
9200 int8_t i8Disp;
9201 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9202 u64EffAddr += i8Disp;
9203 uInfo |= (uint32_t)(int32_t)i8Disp;
9204 break;
9205 }
9206 case 2:
9207 {
9208 uint32_t u32Disp;
9209 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9210 u64EffAddr += (int32_t)u32Disp;
9211 uInfo |= u32Disp;
9212 break;
9213 }
9214 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9215 }
9216
9217 }
9218
9219 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9220 *pGCPtrEff = u64EffAddr;
9221 else
9222 {
9223 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9224 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9225 }
9226 }
9227 *puInfo = uInfo;
9228
9229 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9230 return VINF_SUCCESS;
9231}
9232
9233/** @} */
9234
9235
9236#ifdef LOG_ENABLED
9237/**
9238 * Logs the current instruction.
9239 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9240 * @param fSameCtx Set if we have the same context information as the VMM,
9241 * clear if we may have already executed an instruction in
9242 * our debug context. When clear, we assume IEMCPU holds
9243 * valid CPU mode info.
9244 *
9245 * The @a fSameCtx parameter is now misleading and obsolete.
9246 * @param pszFunction The IEM function doing the execution.
9247 */
9248static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9249{
9250# ifdef IN_RING3
9251 if (LogIs2Enabled())
9252 {
9253 char szInstr[256];
9254 uint32_t cbInstr = 0;
9255 if (fSameCtx)
9256 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9257 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9258 szInstr, sizeof(szInstr), &cbInstr);
9259 else
9260 {
9261 uint32_t fFlags = 0;
9262 switch (IEM_GET_CPU_MODE(pVCpu))
9263 {
9264 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9265 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9266 case IEMMODE_16BIT:
9267 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9268 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9269 else
9270 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9271 break;
9272 }
9273 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9274 szInstr, sizeof(szInstr), &cbInstr);
9275 }
9276
9277 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9278 Log2(("**** %s fExec=%x\n"
9279 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9280 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9281 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9282 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9283 " %s\n"
9284 , pszFunction, pVCpu->iem.s.fExec,
9285 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9286 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9287 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9288 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9289 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9290 szInstr));
9291
9292 /* This stuff sucks atm. as it fills the log with MSRs. */
9293 //if (LogIs3Enabled())
9294 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9295 }
9296 else
9297# endif
9298 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9299 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9300 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9301}
9302#endif /* LOG_ENABLED */
9303
9304
9305#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9306/**
9307 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9308 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9309 *
9310 * @returns Modified rcStrict.
9311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9312 * @param rcStrict The instruction execution status.
9313 */
9314static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9315{
9316 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9317 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9318 {
9319 /* VMX preemption timer takes priority over NMI-window exits. */
9320 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9321 {
9322 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9323 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9324 }
9325 /*
9326 * Check remaining intercepts.
9327 *
9328 * NMI-window and Interrupt-window VM-exits.
9329 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9330 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9331 *
9332 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9333 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9334 */
9335 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9336 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9337 && !TRPMHasTrap(pVCpu))
9338 {
9339 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9340 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9341 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9342 {
9343 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9344 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9345 }
9346 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9347 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9348 {
9349 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9350 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9351 }
9352 }
9353 }
9354 /* TPR-below threshold/APIC write has the highest priority. */
9355 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9356 {
9357 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9358 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9359 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9360 }
9361 /* MTF takes priority over VMX-preemption timer. */
9362 else
9363 {
9364 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9365 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9366 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9367 }
9368 return rcStrict;
9369}
9370#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9371
9372
9373/**
9374 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9375 * IEMExecOneWithPrefetchedByPC.
9376 *
9377 * Similar code is found in IEMExecLots.
9378 *
9379 * @return Strict VBox status code.
9380 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9381 * @param fExecuteInhibit If set, execute the instruction following CLI,
9382 * POP SS and MOV SS,GR.
9383 * @param pszFunction The calling function name.
9384 */
9385DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9386{
9387 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9388 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9389 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9390 RT_NOREF_PV(pszFunction);
9391
9392#ifdef IEM_WITH_SETJMP
9393 VBOXSTRICTRC rcStrict;
9394 IEM_TRY_SETJMP(pVCpu, rcStrict)
9395 {
9396 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9397 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9398 }
9399 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9400 {
9401 pVCpu->iem.s.cLongJumps++;
9402 }
9403 IEM_CATCH_LONGJMP_END(pVCpu);
9404#else
9405 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9406 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9407#endif
9408 if (rcStrict == VINF_SUCCESS)
9409 pVCpu->iem.s.cInstructions++;
9410 if (pVCpu->iem.s.cActiveMappings > 0)
9411 {
9412 Assert(rcStrict != VINF_SUCCESS);
9413 iemMemRollback(pVCpu);
9414 }
9415 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9416 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9417 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9418
9419//#ifdef DEBUG
9420// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9421//#endif
9422
9423#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9424 /*
9425 * Perform any VMX nested-guest instruction boundary actions.
9426 *
9427 * If any of these causes a VM-exit, we must skip executing the next
9428 * instruction (would run into stale page tables). A VM-exit makes sure
9429 * there is no interrupt-inhibition, so that should ensure we don't go
9430 * to try execute the next instruction. Clearing fExecuteInhibit is
9431 * problematic because of the setjmp/longjmp clobbering above.
9432 */
9433 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9434 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9435 || rcStrict != VINF_SUCCESS)
9436 { /* likely */ }
9437 else
9438 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9439#endif
9440
9441 /* Execute the next instruction as well if a cli, pop ss or
9442 mov ss, Gr has just completed successfully. */
9443 if ( fExecuteInhibit
9444 && rcStrict == VINF_SUCCESS
9445 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9446 {
9447 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9448 if (rcStrict == VINF_SUCCESS)
9449 {
9450#ifdef LOG_ENABLED
9451 iemLogCurInstr(pVCpu, false, pszFunction);
9452#endif
9453#ifdef IEM_WITH_SETJMP
9454 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9455 {
9456 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9457 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9458 }
9459 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9460 {
9461 pVCpu->iem.s.cLongJumps++;
9462 }
9463 IEM_CATCH_LONGJMP_END(pVCpu);
9464#else
9465 IEM_OPCODE_GET_FIRST_U8(&b);
9466 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9467#endif
9468 if (rcStrict == VINF_SUCCESS)
9469 {
9470 pVCpu->iem.s.cInstructions++;
9471#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9472 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9473 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9474 { /* likely */ }
9475 else
9476 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9477#endif
9478 }
9479 if (pVCpu->iem.s.cActiveMappings > 0)
9480 {
9481 Assert(rcStrict != VINF_SUCCESS);
9482 iemMemRollback(pVCpu);
9483 }
9484 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9485 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9486 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9487 }
9488 else if (pVCpu->iem.s.cActiveMappings > 0)
9489 iemMemRollback(pVCpu);
9490 /** @todo drop this after we bake this change into RIP advancing. */
9491 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9492 }
9493
9494 /*
9495 * Return value fiddling, statistics and sanity assertions.
9496 */
9497 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9498
9499 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9500 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9501 return rcStrict;
9502}
9503
9504
9505/**
9506 * Execute one instruction.
9507 *
9508 * @return Strict VBox status code.
9509 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9510 */
9511VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9512{
9513 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9514#ifdef LOG_ENABLED
9515 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9516#endif
9517
9518 /*
9519 * Do the decoding and emulation.
9520 */
9521 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9522 if (rcStrict == VINF_SUCCESS)
9523 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9524 else if (pVCpu->iem.s.cActiveMappings > 0)
9525 iemMemRollback(pVCpu);
9526
9527 if (rcStrict != VINF_SUCCESS)
9528 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9529 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9530 return rcStrict;
9531}
9532
9533
9534VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9535{
9536 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9537 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9538 if (rcStrict == VINF_SUCCESS)
9539 {
9540 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9541 if (pcbWritten)
9542 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9543 }
9544 else if (pVCpu->iem.s.cActiveMappings > 0)
9545 iemMemRollback(pVCpu);
9546
9547 return rcStrict;
9548}
9549
9550
9551VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9552 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9553{
9554 VBOXSTRICTRC rcStrict;
9555 if ( cbOpcodeBytes
9556 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9557 {
9558 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9559#ifdef IEM_WITH_CODE_TLB
9560 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9561 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9562 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9563 pVCpu->iem.s.offCurInstrStart = 0;
9564 pVCpu->iem.s.offInstrNextByte = 0;
9565 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9566#else
9567 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9568 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9569#endif
9570 rcStrict = VINF_SUCCESS;
9571 }
9572 else
9573 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9574 if (rcStrict == VINF_SUCCESS)
9575 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9576 else if (pVCpu->iem.s.cActiveMappings > 0)
9577 iemMemRollback(pVCpu);
9578
9579 return rcStrict;
9580}
9581
9582
9583VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9584{
9585 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9586 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9587 if (rcStrict == VINF_SUCCESS)
9588 {
9589 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9590 if (pcbWritten)
9591 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9592 }
9593 else if (pVCpu->iem.s.cActiveMappings > 0)
9594 iemMemRollback(pVCpu);
9595
9596 return rcStrict;
9597}
9598
9599
9600VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9601 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9602{
9603 VBOXSTRICTRC rcStrict;
9604 if ( cbOpcodeBytes
9605 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9606 {
9607 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9608#ifdef IEM_WITH_CODE_TLB
9609 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9610 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9611 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9612 pVCpu->iem.s.offCurInstrStart = 0;
9613 pVCpu->iem.s.offInstrNextByte = 0;
9614 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9615#else
9616 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9617 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9618#endif
9619 rcStrict = VINF_SUCCESS;
9620 }
9621 else
9622 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9623 if (rcStrict == VINF_SUCCESS)
9624 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9625 else if (pVCpu->iem.s.cActiveMappings > 0)
9626 iemMemRollback(pVCpu);
9627
9628 return rcStrict;
9629}
9630
9631
9632/**
9633 * For handling split cacheline lock operations when the host has split-lock
9634 * detection enabled.
9635 *
9636 * This will cause the interpreter to disregard the lock prefix and implicit
9637 * locking (xchg).
9638 *
9639 * @returns Strict VBox status code.
9640 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9641 */
9642VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9643{
9644 /*
9645 * Do the decoding and emulation.
9646 */
9647 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9648 if (rcStrict == VINF_SUCCESS)
9649 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9650 else if (pVCpu->iem.s.cActiveMappings > 0)
9651 iemMemRollback(pVCpu);
9652
9653 if (rcStrict != VINF_SUCCESS)
9654 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9655 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9656 return rcStrict;
9657}
9658
9659
9660/**
9661 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9662 * inject a pending TRPM trap.
9663 */
9664VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9665{
9666 Assert(TRPMHasTrap(pVCpu));
9667
9668 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9669 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9670 {
9671 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9672#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9673 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9674 if (fIntrEnabled)
9675 {
9676 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9677 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9678 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9679 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9680 else
9681 {
9682 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9683 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9684 }
9685 }
9686#else
9687 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9688#endif
9689 if (fIntrEnabled)
9690 {
9691 uint8_t u8TrapNo;
9692 TRPMEVENT enmType;
9693 uint32_t uErrCode;
9694 RTGCPTR uCr2;
9695 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9696 AssertRC(rc2);
9697 Assert(enmType == TRPM_HARDWARE_INT);
9698 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9699
9700 TRPMResetTrap(pVCpu);
9701
9702#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9703 /* Injecting an event may cause a VM-exit. */
9704 if ( rcStrict != VINF_SUCCESS
9705 && rcStrict != VINF_IEM_RAISED_XCPT)
9706 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9707#else
9708 NOREF(rcStrict);
9709#endif
9710 }
9711 }
9712
9713 return VINF_SUCCESS;
9714}
9715
9716
9717VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9718{
9719 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9720 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9721 Assert(cMaxInstructions > 0);
9722
9723 /*
9724 * See if there is an interrupt pending in TRPM, inject it if we can.
9725 */
9726 /** @todo What if we are injecting an exception and not an interrupt? Is that
9727 * possible here? For now we assert it is indeed only an interrupt. */
9728 if (!TRPMHasTrap(pVCpu))
9729 { /* likely */ }
9730 else
9731 {
9732 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9733 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9734 { /*likely */ }
9735 else
9736 return rcStrict;
9737 }
9738
9739 /*
9740 * Initial decoder init w/ prefetch, then setup setjmp.
9741 */
9742 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9743 if (rcStrict == VINF_SUCCESS)
9744 {
9745#ifdef IEM_WITH_SETJMP
9746 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9747 IEM_TRY_SETJMP(pVCpu, rcStrict)
9748#endif
9749 {
9750 /*
9751 * The run loop. We limit ourselves to 4096 instructions right now.
9752 */
9753 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9754 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9755 for (;;)
9756 {
9757 /*
9758 * Log the state.
9759 */
9760#ifdef LOG_ENABLED
9761 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9762#endif
9763
9764 /*
9765 * Do the decoding and emulation.
9766 */
9767 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9768 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9769#ifdef VBOX_STRICT
9770 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9771#endif
9772 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9773 {
9774 Assert(pVCpu->iem.s.cActiveMappings == 0);
9775 pVCpu->iem.s.cInstructions++;
9776
9777#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9778 /* Perform any VMX nested-guest instruction boundary actions. */
9779 uint64_t fCpu = pVCpu->fLocalForcedActions;
9780 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9781 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9782 { /* likely */ }
9783 else
9784 {
9785 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9786 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9787 fCpu = pVCpu->fLocalForcedActions;
9788 else
9789 {
9790 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9791 break;
9792 }
9793 }
9794#endif
9795 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9796 {
9797#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9798 uint64_t fCpu = pVCpu->fLocalForcedActions;
9799#endif
9800 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9801 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9802 | VMCPU_FF_TLB_FLUSH
9803 | VMCPU_FF_UNHALT );
9804
9805 if (RT_LIKELY( ( !fCpu
9806 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9807 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9808 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9809 {
9810 if (--cMaxInstructionsGccStupidity > 0)
9811 {
9812 /* Poll timers every now an then according to the caller's specs. */
9813 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9814 || !TMTimerPollBool(pVM, pVCpu))
9815 {
9816 Assert(pVCpu->iem.s.cActiveMappings == 0);
9817 iemReInitDecoder(pVCpu);
9818 continue;
9819 }
9820 }
9821 }
9822 }
9823 Assert(pVCpu->iem.s.cActiveMappings == 0);
9824 }
9825 else if (pVCpu->iem.s.cActiveMappings > 0)
9826 iemMemRollback(pVCpu);
9827 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9828 break;
9829 }
9830 }
9831#ifdef IEM_WITH_SETJMP
9832 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9833 {
9834 if (pVCpu->iem.s.cActiveMappings > 0)
9835 iemMemRollback(pVCpu);
9836# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9837 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9838# endif
9839 pVCpu->iem.s.cLongJumps++;
9840 }
9841 IEM_CATCH_LONGJMP_END(pVCpu);
9842#endif
9843
9844 /*
9845 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9846 */
9847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9848 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9849 }
9850 else
9851 {
9852 if (pVCpu->iem.s.cActiveMappings > 0)
9853 iemMemRollback(pVCpu);
9854
9855#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9856 /*
9857 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9858 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9859 */
9860 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9861#endif
9862 }
9863
9864 /*
9865 * Maybe re-enter raw-mode and log.
9866 */
9867 if (rcStrict != VINF_SUCCESS)
9868 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9869 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9870 if (pcInstructions)
9871 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9872 return rcStrict;
9873}
9874
9875
9876/**
9877 * Interface used by EMExecuteExec, does exit statistics and limits.
9878 *
9879 * @returns Strict VBox status code.
9880 * @param pVCpu The cross context virtual CPU structure.
9881 * @param fWillExit To be defined.
9882 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9883 * @param cMaxInstructions Maximum number of instructions to execute.
9884 * @param cMaxInstructionsWithoutExits
9885 * The max number of instructions without exits.
9886 * @param pStats Where to return statistics.
9887 */
9888VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9889 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9890{
9891 NOREF(fWillExit); /** @todo define flexible exit crits */
9892
9893 /*
9894 * Initialize return stats.
9895 */
9896 pStats->cInstructions = 0;
9897 pStats->cExits = 0;
9898 pStats->cMaxExitDistance = 0;
9899 pStats->cReserved = 0;
9900
9901 /*
9902 * Initial decoder init w/ prefetch, then setup setjmp.
9903 */
9904 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9905 if (rcStrict == VINF_SUCCESS)
9906 {
9907#ifdef IEM_WITH_SETJMP
9908 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9909 IEM_TRY_SETJMP(pVCpu, rcStrict)
9910#endif
9911 {
9912#ifdef IN_RING0
9913 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9914#endif
9915 uint32_t cInstructionSinceLastExit = 0;
9916
9917 /*
9918 * The run loop. We limit ourselves to 4096 instructions right now.
9919 */
9920 PVM pVM = pVCpu->CTX_SUFF(pVM);
9921 for (;;)
9922 {
9923 /*
9924 * Log the state.
9925 */
9926#ifdef LOG_ENABLED
9927 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9928#endif
9929
9930 /*
9931 * Do the decoding and emulation.
9932 */
9933 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9934
9935 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9936 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9937
9938 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9939 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9940 {
9941 pStats->cExits += 1;
9942 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9943 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9944 cInstructionSinceLastExit = 0;
9945 }
9946
9947 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9948 {
9949 Assert(pVCpu->iem.s.cActiveMappings == 0);
9950 pVCpu->iem.s.cInstructions++;
9951 pStats->cInstructions++;
9952 cInstructionSinceLastExit++;
9953
9954#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9955 /* Perform any VMX nested-guest instruction boundary actions. */
9956 uint64_t fCpu = pVCpu->fLocalForcedActions;
9957 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9958 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9959 { /* likely */ }
9960 else
9961 {
9962 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9963 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9964 fCpu = pVCpu->fLocalForcedActions;
9965 else
9966 {
9967 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9968 break;
9969 }
9970 }
9971#endif
9972 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9973 {
9974#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9975 uint64_t fCpu = pVCpu->fLocalForcedActions;
9976#endif
9977 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9978 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9979 | VMCPU_FF_TLB_FLUSH
9980 | VMCPU_FF_UNHALT );
9981 if (RT_LIKELY( ( ( !fCpu
9982 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9983 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9984 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9985 || pStats->cInstructions < cMinInstructions))
9986 {
9987 if (pStats->cInstructions < cMaxInstructions)
9988 {
9989 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9990 {
9991#ifdef IN_RING0
9992 if ( !fCheckPreemptionPending
9993 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9994#endif
9995 {
9996 Assert(pVCpu->iem.s.cActiveMappings == 0);
9997 iemReInitDecoder(pVCpu);
9998 continue;
9999 }
10000#ifdef IN_RING0
10001 rcStrict = VINF_EM_RAW_INTERRUPT;
10002 break;
10003#endif
10004 }
10005 }
10006 }
10007 Assert(!(fCpu & VMCPU_FF_IEM));
10008 }
10009 Assert(pVCpu->iem.s.cActiveMappings == 0);
10010 }
10011 else if (pVCpu->iem.s.cActiveMappings > 0)
10012 iemMemRollback(pVCpu);
10013 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10014 break;
10015 }
10016 }
10017#ifdef IEM_WITH_SETJMP
10018 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10019 {
10020 if (pVCpu->iem.s.cActiveMappings > 0)
10021 iemMemRollback(pVCpu);
10022 pVCpu->iem.s.cLongJumps++;
10023 }
10024 IEM_CATCH_LONGJMP_END(pVCpu);
10025#endif
10026
10027 /*
10028 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10029 */
10030 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10031 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10032 }
10033 else
10034 {
10035 if (pVCpu->iem.s.cActiveMappings > 0)
10036 iemMemRollback(pVCpu);
10037
10038#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10039 /*
10040 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10041 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10042 */
10043 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10044#endif
10045 }
10046
10047 /*
10048 * Maybe re-enter raw-mode and log.
10049 */
10050 if (rcStrict != VINF_SUCCESS)
10051 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10052 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10053 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10054 return rcStrict;
10055}
10056
10057
10058/**
10059 * Injects a trap, fault, abort, software interrupt or external interrupt.
10060 *
10061 * The parameter list matches TRPMQueryTrapAll pretty closely.
10062 *
10063 * @returns Strict VBox status code.
10064 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10065 * @param u8TrapNo The trap number.
10066 * @param enmType What type is it (trap/fault/abort), software
10067 * interrupt or hardware interrupt.
10068 * @param uErrCode The error code if applicable.
10069 * @param uCr2 The CR2 value if applicable.
10070 * @param cbInstr The instruction length (only relevant for
10071 * software interrupts).
10072 */
10073VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10074 uint8_t cbInstr)
10075{
10076 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10077#ifdef DBGFTRACE_ENABLED
10078 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10079 u8TrapNo, enmType, uErrCode, uCr2);
10080#endif
10081
10082 uint32_t fFlags;
10083 switch (enmType)
10084 {
10085 case TRPM_HARDWARE_INT:
10086 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10087 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10088 uErrCode = uCr2 = 0;
10089 break;
10090
10091 case TRPM_SOFTWARE_INT:
10092 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10093 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10094 uErrCode = uCr2 = 0;
10095 break;
10096
10097 case TRPM_TRAP:
10098 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
10099 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10100 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10101 if (u8TrapNo == X86_XCPT_PF)
10102 fFlags |= IEM_XCPT_FLAGS_CR2;
10103 switch (u8TrapNo)
10104 {
10105 case X86_XCPT_DF:
10106 case X86_XCPT_TS:
10107 case X86_XCPT_NP:
10108 case X86_XCPT_SS:
10109 case X86_XCPT_PF:
10110 case X86_XCPT_AC:
10111 case X86_XCPT_GP:
10112 fFlags |= IEM_XCPT_FLAGS_ERR;
10113 break;
10114 }
10115 break;
10116
10117 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10118 }
10119
10120 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10121
10122 if (pVCpu->iem.s.cActiveMappings > 0)
10123 iemMemRollback(pVCpu);
10124
10125 return rcStrict;
10126}
10127
10128
10129/**
10130 * Injects the active TRPM event.
10131 *
10132 * @returns Strict VBox status code.
10133 * @param pVCpu The cross context virtual CPU structure.
10134 */
10135VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10136{
10137#ifndef IEM_IMPLEMENTS_TASKSWITCH
10138 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10139#else
10140 uint8_t u8TrapNo;
10141 TRPMEVENT enmType;
10142 uint32_t uErrCode;
10143 RTGCUINTPTR uCr2;
10144 uint8_t cbInstr;
10145 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10146 if (RT_FAILURE(rc))
10147 return rc;
10148
10149 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10150 * ICEBP \#DB injection as a special case. */
10151 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10152#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10153 if (rcStrict == VINF_SVM_VMEXIT)
10154 rcStrict = VINF_SUCCESS;
10155#endif
10156#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10157 if (rcStrict == VINF_VMX_VMEXIT)
10158 rcStrict = VINF_SUCCESS;
10159#endif
10160 /** @todo Are there any other codes that imply the event was successfully
10161 * delivered to the guest? See @bugref{6607}. */
10162 if ( rcStrict == VINF_SUCCESS
10163 || rcStrict == VINF_IEM_RAISED_XCPT)
10164 TRPMResetTrap(pVCpu);
10165
10166 return rcStrict;
10167#endif
10168}
10169
10170
10171VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10172{
10173 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10174 return VERR_NOT_IMPLEMENTED;
10175}
10176
10177
10178VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10179{
10180 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10181 return VERR_NOT_IMPLEMENTED;
10182}
10183
10184
10185/**
10186 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10187 *
10188 * This API ASSUMES that the caller has already verified that the guest code is
10189 * allowed to access the I/O port. (The I/O port is in the DX register in the
10190 * guest state.)
10191 *
10192 * @returns Strict VBox status code.
10193 * @param pVCpu The cross context virtual CPU structure.
10194 * @param cbValue The size of the I/O port access (1, 2, or 4).
10195 * @param enmAddrMode The addressing mode.
10196 * @param fRepPrefix Indicates whether a repeat prefix is used
10197 * (doesn't matter which for this instruction).
10198 * @param cbInstr The instruction length in bytes.
10199 * @param iEffSeg The effective segment address.
10200 * @param fIoChecked Whether the access to the I/O port has been
10201 * checked or not. It's typically checked in the
10202 * HM scenario.
10203 */
10204VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10205 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10206{
10207 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10208 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10209
10210 /*
10211 * State init.
10212 */
10213 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10214
10215 /*
10216 * Switch orgy for getting to the right handler.
10217 */
10218 VBOXSTRICTRC rcStrict;
10219 if (fRepPrefix)
10220 {
10221 switch (enmAddrMode)
10222 {
10223 case IEMMODE_16BIT:
10224 switch (cbValue)
10225 {
10226 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10227 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10228 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10229 default:
10230 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10231 }
10232 break;
10233
10234 case IEMMODE_32BIT:
10235 switch (cbValue)
10236 {
10237 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10238 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10239 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10240 default:
10241 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10242 }
10243 break;
10244
10245 case IEMMODE_64BIT:
10246 switch (cbValue)
10247 {
10248 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10249 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10250 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10251 default:
10252 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10253 }
10254 break;
10255
10256 default:
10257 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10258 }
10259 }
10260 else
10261 {
10262 switch (enmAddrMode)
10263 {
10264 case IEMMODE_16BIT:
10265 switch (cbValue)
10266 {
10267 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10268 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10269 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10270 default:
10271 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10272 }
10273 break;
10274
10275 case IEMMODE_32BIT:
10276 switch (cbValue)
10277 {
10278 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10279 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10280 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10281 default:
10282 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10283 }
10284 break;
10285
10286 case IEMMODE_64BIT:
10287 switch (cbValue)
10288 {
10289 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10290 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10291 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10292 default:
10293 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10294 }
10295 break;
10296
10297 default:
10298 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10299 }
10300 }
10301
10302 if (pVCpu->iem.s.cActiveMappings)
10303 iemMemRollback(pVCpu);
10304
10305 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10306}
10307
10308
10309/**
10310 * Interface for HM and EM for executing string I/O IN (read) instructions.
10311 *
10312 * This API ASSUMES that the caller has already verified that the guest code is
10313 * allowed to access the I/O port. (The I/O port is in the DX register in the
10314 * guest state.)
10315 *
10316 * @returns Strict VBox status code.
10317 * @param pVCpu The cross context virtual CPU structure.
10318 * @param cbValue The size of the I/O port access (1, 2, or 4).
10319 * @param enmAddrMode The addressing mode.
10320 * @param fRepPrefix Indicates whether a repeat prefix is used
10321 * (doesn't matter which for this instruction).
10322 * @param cbInstr The instruction length in bytes.
10323 * @param fIoChecked Whether the access to the I/O port has been
10324 * checked or not. It's typically checked in the
10325 * HM scenario.
10326 */
10327VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10328 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10329{
10330 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10331
10332 /*
10333 * State init.
10334 */
10335 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10336
10337 /*
10338 * Switch orgy for getting to the right handler.
10339 */
10340 VBOXSTRICTRC rcStrict;
10341 if (fRepPrefix)
10342 {
10343 switch (enmAddrMode)
10344 {
10345 case IEMMODE_16BIT:
10346 switch (cbValue)
10347 {
10348 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10349 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10350 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10351 default:
10352 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10353 }
10354 break;
10355
10356 case IEMMODE_32BIT:
10357 switch (cbValue)
10358 {
10359 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10360 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10361 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10362 default:
10363 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10364 }
10365 break;
10366
10367 case IEMMODE_64BIT:
10368 switch (cbValue)
10369 {
10370 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10371 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10372 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10373 default:
10374 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10375 }
10376 break;
10377
10378 default:
10379 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10380 }
10381 }
10382 else
10383 {
10384 switch (enmAddrMode)
10385 {
10386 case IEMMODE_16BIT:
10387 switch (cbValue)
10388 {
10389 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10390 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10391 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10392 default:
10393 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10394 }
10395 break;
10396
10397 case IEMMODE_32BIT:
10398 switch (cbValue)
10399 {
10400 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10401 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10402 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10403 default:
10404 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10405 }
10406 break;
10407
10408 case IEMMODE_64BIT:
10409 switch (cbValue)
10410 {
10411 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10412 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10413 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10414 default:
10415 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10416 }
10417 break;
10418
10419 default:
10420 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10421 }
10422 }
10423
10424 if ( pVCpu->iem.s.cActiveMappings == 0
10425 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10426 { /* likely */ }
10427 else
10428 {
10429 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10430 iemMemRollback(pVCpu);
10431 }
10432 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10433}
10434
10435
10436/**
10437 * Interface for rawmode to write execute an OUT instruction.
10438 *
10439 * @returns Strict VBox status code.
10440 * @param pVCpu The cross context virtual CPU structure.
10441 * @param cbInstr The instruction length in bytes.
10442 * @param u16Port The port to read.
10443 * @param fImm Whether the port is specified using an immediate operand or
10444 * using the implicit DX register.
10445 * @param cbReg The register size.
10446 *
10447 * @remarks In ring-0 not all of the state needs to be synced in.
10448 */
10449VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10450{
10451 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10452 Assert(cbReg <= 4 && cbReg != 3);
10453
10454 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10455 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10456 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10457 Assert(!pVCpu->iem.s.cActiveMappings);
10458 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10459}
10460
10461
10462/**
10463 * Interface for rawmode to write execute an IN instruction.
10464 *
10465 * @returns Strict VBox status code.
10466 * @param pVCpu The cross context virtual CPU structure.
10467 * @param cbInstr The instruction length in bytes.
10468 * @param u16Port The port to read.
10469 * @param fImm Whether the port is specified using an immediate operand or
10470 * using the implicit DX.
10471 * @param cbReg The register size.
10472 */
10473VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10474{
10475 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10476 Assert(cbReg <= 4 && cbReg != 3);
10477
10478 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10479 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10480 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10481 Assert(!pVCpu->iem.s.cActiveMappings);
10482 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10483}
10484
10485
10486/**
10487 * Interface for HM and EM to write to a CRx register.
10488 *
10489 * @returns Strict VBox status code.
10490 * @param pVCpu The cross context virtual CPU structure.
10491 * @param cbInstr The instruction length in bytes.
10492 * @param iCrReg The control register number (destination).
10493 * @param iGReg The general purpose register number (source).
10494 *
10495 * @remarks In ring-0 not all of the state needs to be synced in.
10496 */
10497VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10498{
10499 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10500 Assert(iCrReg < 16);
10501 Assert(iGReg < 16);
10502
10503 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10504 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10505 Assert(!pVCpu->iem.s.cActiveMappings);
10506 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10507}
10508
10509
10510/**
10511 * Interface for HM and EM to read from a CRx register.
10512 *
10513 * @returns Strict VBox status code.
10514 * @param pVCpu The cross context virtual CPU structure.
10515 * @param cbInstr The instruction length in bytes.
10516 * @param iGReg The general purpose register number (destination).
10517 * @param iCrReg The control register number (source).
10518 *
10519 * @remarks In ring-0 not all of the state needs to be synced in.
10520 */
10521VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10522{
10523 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10524 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10525 | CPUMCTX_EXTRN_APIC_TPR);
10526 Assert(iCrReg < 16);
10527 Assert(iGReg < 16);
10528
10529 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10530 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10531 Assert(!pVCpu->iem.s.cActiveMappings);
10532 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10533}
10534
10535
10536/**
10537 * Interface for HM and EM to write to a DRx register.
10538 *
10539 * @returns Strict VBox status code.
10540 * @param pVCpu The cross context virtual CPU structure.
10541 * @param cbInstr The instruction length in bytes.
10542 * @param iDrReg The debug register number (destination).
10543 * @param iGReg The general purpose register number (source).
10544 *
10545 * @remarks In ring-0 not all of the state needs to be synced in.
10546 */
10547VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10548{
10549 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10550 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10551 Assert(iDrReg < 8);
10552 Assert(iGReg < 16);
10553
10554 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10555 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10556 Assert(!pVCpu->iem.s.cActiveMappings);
10557 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10558}
10559
10560
10561/**
10562 * Interface for HM and EM to read from a DRx register.
10563 *
10564 * @returns Strict VBox status code.
10565 * @param pVCpu The cross context virtual CPU structure.
10566 * @param cbInstr The instruction length in bytes.
10567 * @param iGReg The general purpose register number (destination).
10568 * @param iDrReg The debug register number (source).
10569 *
10570 * @remarks In ring-0 not all of the state needs to be synced in.
10571 */
10572VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10573{
10574 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10575 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10576 Assert(iDrReg < 8);
10577 Assert(iGReg < 16);
10578
10579 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10580 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10581 Assert(!pVCpu->iem.s.cActiveMappings);
10582 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10583}
10584
10585
10586/**
10587 * Interface for HM and EM to clear the CR0[TS] bit.
10588 *
10589 * @returns Strict VBox status code.
10590 * @param pVCpu The cross context virtual CPU structure.
10591 * @param cbInstr The instruction length in bytes.
10592 *
10593 * @remarks In ring-0 not all of the state needs to be synced in.
10594 */
10595VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10596{
10597 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10598
10599 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10600 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10601 Assert(!pVCpu->iem.s.cActiveMappings);
10602 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10603}
10604
10605
10606/**
10607 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10608 *
10609 * @returns Strict VBox status code.
10610 * @param pVCpu The cross context virtual CPU structure.
10611 * @param cbInstr The instruction length in bytes.
10612 * @param uValue The value to load into CR0.
10613 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10614 * memory operand. Otherwise pass NIL_RTGCPTR.
10615 *
10616 * @remarks In ring-0 not all of the state needs to be synced in.
10617 */
10618VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10619{
10620 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10621
10622 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10623 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10624 Assert(!pVCpu->iem.s.cActiveMappings);
10625 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10626}
10627
10628
10629/**
10630 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10631 *
10632 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10633 *
10634 * @returns Strict VBox status code.
10635 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10636 * @param cbInstr The instruction length in bytes.
10637 * @remarks In ring-0 not all of the state needs to be synced in.
10638 * @thread EMT(pVCpu)
10639 */
10640VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10641{
10642 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10643
10644 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10645 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10646 Assert(!pVCpu->iem.s.cActiveMappings);
10647 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10648}
10649
10650
10651/**
10652 * Interface for HM and EM to emulate the WBINVD instruction.
10653 *
10654 * @returns Strict VBox status code.
10655 * @param pVCpu The cross context virtual CPU structure.
10656 * @param cbInstr The instruction length in bytes.
10657 *
10658 * @remarks In ring-0 not all of the state needs to be synced in.
10659 */
10660VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10661{
10662 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10663
10664 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10665 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10666 Assert(!pVCpu->iem.s.cActiveMappings);
10667 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10668}
10669
10670
10671/**
10672 * Interface for HM and EM to emulate the INVD instruction.
10673 *
10674 * @returns Strict VBox status code.
10675 * @param pVCpu The cross context virtual CPU structure.
10676 * @param cbInstr The instruction length in bytes.
10677 *
10678 * @remarks In ring-0 not all of the state needs to be synced in.
10679 */
10680VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10681{
10682 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10683
10684 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10685 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10686 Assert(!pVCpu->iem.s.cActiveMappings);
10687 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10688}
10689
10690
10691/**
10692 * Interface for HM and EM to emulate the INVLPG instruction.
10693 *
10694 * @returns Strict VBox status code.
10695 * @retval VINF_PGM_SYNC_CR3
10696 *
10697 * @param pVCpu The cross context virtual CPU structure.
10698 * @param cbInstr The instruction length in bytes.
10699 * @param GCPtrPage The effective address of the page to invalidate.
10700 *
10701 * @remarks In ring-0 not all of the state needs to be synced in.
10702 */
10703VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10704{
10705 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10706
10707 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10708 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10709 Assert(!pVCpu->iem.s.cActiveMappings);
10710 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10711}
10712
10713
10714/**
10715 * Interface for HM and EM to emulate the INVPCID instruction.
10716 *
10717 * @returns Strict VBox status code.
10718 * @retval VINF_PGM_SYNC_CR3
10719 *
10720 * @param pVCpu The cross context virtual CPU structure.
10721 * @param cbInstr The instruction length in bytes.
10722 * @param iEffSeg The effective segment register.
10723 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10724 * @param uType The invalidation type.
10725 *
10726 * @remarks In ring-0 not all of the state needs to be synced in.
10727 */
10728VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10729 uint64_t uType)
10730{
10731 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10732
10733 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10734 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10735 Assert(!pVCpu->iem.s.cActiveMappings);
10736 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10737}
10738
10739
10740/**
10741 * Interface for HM and EM to emulate the CPUID instruction.
10742 *
10743 * @returns Strict VBox status code.
10744 *
10745 * @param pVCpu The cross context virtual CPU structure.
10746 * @param cbInstr The instruction length in bytes.
10747 *
10748 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10749 */
10750VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10751{
10752 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10753 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10754
10755 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10756 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10757 Assert(!pVCpu->iem.s.cActiveMappings);
10758 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10759}
10760
10761
10762/**
10763 * Interface for HM and EM to emulate the RDPMC instruction.
10764 *
10765 * @returns Strict VBox status code.
10766 *
10767 * @param pVCpu The cross context virtual CPU structure.
10768 * @param cbInstr The instruction length in bytes.
10769 *
10770 * @remarks Not all of the state needs to be synced in.
10771 */
10772VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10773{
10774 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10775 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10776
10777 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10778 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10779 Assert(!pVCpu->iem.s.cActiveMappings);
10780 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10781}
10782
10783
10784/**
10785 * Interface for HM and EM to emulate the RDTSC instruction.
10786 *
10787 * @returns Strict VBox status code.
10788 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10789 *
10790 * @param pVCpu The cross context virtual CPU structure.
10791 * @param cbInstr The instruction length in bytes.
10792 *
10793 * @remarks Not all of the state needs to be synced in.
10794 */
10795VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10796{
10797 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10798 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10799
10800 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10801 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10802 Assert(!pVCpu->iem.s.cActiveMappings);
10803 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10804}
10805
10806
10807/**
10808 * Interface for HM and EM to emulate the RDTSCP instruction.
10809 *
10810 * @returns Strict VBox status code.
10811 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10812 *
10813 * @param pVCpu The cross context virtual CPU structure.
10814 * @param cbInstr The instruction length in bytes.
10815 *
10816 * @remarks Not all of the state needs to be synced in. Recommended
10817 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10818 */
10819VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10820{
10821 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10822 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10823
10824 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10825 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10826 Assert(!pVCpu->iem.s.cActiveMappings);
10827 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10828}
10829
10830
10831/**
10832 * Interface for HM and EM to emulate the RDMSR instruction.
10833 *
10834 * @returns Strict VBox status code.
10835 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10836 *
10837 * @param pVCpu The cross context virtual CPU structure.
10838 * @param cbInstr The instruction length in bytes.
10839 *
10840 * @remarks Not all of the state needs to be synced in. Requires RCX and
10841 * (currently) all MSRs.
10842 */
10843VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10844{
10845 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10846 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10847
10848 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10849 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10850 Assert(!pVCpu->iem.s.cActiveMappings);
10851 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10852}
10853
10854
10855/**
10856 * Interface for HM and EM to emulate the WRMSR instruction.
10857 *
10858 * @returns Strict VBox status code.
10859 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10860 *
10861 * @param pVCpu The cross context virtual CPU structure.
10862 * @param cbInstr The instruction length in bytes.
10863 *
10864 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10865 * and (currently) all MSRs.
10866 */
10867VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10868{
10869 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10870 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10871 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10872
10873 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10874 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10875 Assert(!pVCpu->iem.s.cActiveMappings);
10876 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10877}
10878
10879
10880/**
10881 * Interface for HM and EM to emulate the MONITOR instruction.
10882 *
10883 * @returns Strict VBox status code.
10884 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10885 *
10886 * @param pVCpu The cross context virtual CPU structure.
10887 * @param cbInstr The instruction length in bytes.
10888 *
10889 * @remarks Not all of the state needs to be synced in.
10890 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10891 * are used.
10892 */
10893VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10894{
10895 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10896 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10897
10898 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10899 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10900 Assert(!pVCpu->iem.s.cActiveMappings);
10901 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10902}
10903
10904
10905/**
10906 * Interface for HM and EM to emulate the MWAIT instruction.
10907 *
10908 * @returns Strict VBox status code.
10909 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10910 *
10911 * @param pVCpu The cross context virtual CPU structure.
10912 * @param cbInstr The instruction length in bytes.
10913 *
10914 * @remarks Not all of the state needs to be synced in.
10915 */
10916VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10917{
10918 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10919 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10920
10921 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10922 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10923 Assert(!pVCpu->iem.s.cActiveMappings);
10924 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10925}
10926
10927
10928/**
10929 * Interface for HM and EM to emulate the HLT instruction.
10930 *
10931 * @returns Strict VBox status code.
10932 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10933 *
10934 * @param pVCpu The cross context virtual CPU structure.
10935 * @param cbInstr The instruction length in bytes.
10936 *
10937 * @remarks Not all of the state needs to be synced in.
10938 */
10939VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10940{
10941 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10942
10943 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10944 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10945 Assert(!pVCpu->iem.s.cActiveMappings);
10946 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10947}
10948
10949
10950/**
10951 * Checks if IEM is in the process of delivering an event (interrupt or
10952 * exception).
10953 *
10954 * @returns true if we're in the process of raising an interrupt or exception,
10955 * false otherwise.
10956 * @param pVCpu The cross context virtual CPU structure.
10957 * @param puVector Where to store the vector associated with the
10958 * currently delivered event, optional.
10959 * @param pfFlags Where to store th event delivery flags (see
10960 * IEM_XCPT_FLAGS_XXX), optional.
10961 * @param puErr Where to store the error code associated with the
10962 * event, optional.
10963 * @param puCr2 Where to store the CR2 associated with the event,
10964 * optional.
10965 * @remarks The caller should check the flags to determine if the error code and
10966 * CR2 are valid for the event.
10967 */
10968VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10969{
10970 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10971 if (fRaisingXcpt)
10972 {
10973 if (puVector)
10974 *puVector = pVCpu->iem.s.uCurXcpt;
10975 if (pfFlags)
10976 *pfFlags = pVCpu->iem.s.fCurXcpt;
10977 if (puErr)
10978 *puErr = pVCpu->iem.s.uCurXcptErr;
10979 if (puCr2)
10980 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10981 }
10982 return fRaisingXcpt;
10983}
10984
10985#ifdef IN_RING3
10986
10987/**
10988 * Handles the unlikely and probably fatal merge cases.
10989 *
10990 * @returns Merged status code.
10991 * @param rcStrict Current EM status code.
10992 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10993 * with @a rcStrict.
10994 * @param iMemMap The memory mapping index. For error reporting only.
10995 * @param pVCpu The cross context virtual CPU structure of the calling
10996 * thread, for error reporting only.
10997 */
10998DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10999 unsigned iMemMap, PVMCPUCC pVCpu)
11000{
11001 if (RT_FAILURE_NP(rcStrict))
11002 return rcStrict;
11003
11004 if (RT_FAILURE_NP(rcStrictCommit))
11005 return rcStrictCommit;
11006
11007 if (rcStrict == rcStrictCommit)
11008 return rcStrictCommit;
11009
11010 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11011 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11012 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11013 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11014 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11015 return VERR_IOM_FF_STATUS_IPE;
11016}
11017
11018
11019/**
11020 * Helper for IOMR3ProcessForceFlag.
11021 *
11022 * @returns Merged status code.
11023 * @param rcStrict Current EM status code.
11024 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11025 * with @a rcStrict.
11026 * @param iMemMap The memory mapping index. For error reporting only.
11027 * @param pVCpu The cross context virtual CPU structure of the calling
11028 * thread, for error reporting only.
11029 */
11030DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11031{
11032 /* Simple. */
11033 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11034 return rcStrictCommit;
11035
11036 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11037 return rcStrict;
11038
11039 /* EM scheduling status codes. */
11040 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11041 && rcStrict <= VINF_EM_LAST))
11042 {
11043 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11044 && rcStrictCommit <= VINF_EM_LAST))
11045 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11046 }
11047
11048 /* Unlikely */
11049 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11050}
11051
11052
11053/**
11054 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11055 *
11056 * @returns Merge between @a rcStrict and what the commit operation returned.
11057 * @param pVM The cross context VM structure.
11058 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11059 * @param rcStrict The status code returned by ring-0 or raw-mode.
11060 */
11061VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11062{
11063 /*
11064 * Reset the pending commit.
11065 */
11066 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11067 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11068 ("%#x %#x %#x\n",
11069 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11070 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11071
11072 /*
11073 * Commit the pending bounce buffers (usually just one).
11074 */
11075 unsigned cBufs = 0;
11076 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11077 while (iMemMap-- > 0)
11078 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11079 {
11080 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11081 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11082 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11083
11084 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11085 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11086 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11087
11088 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11089 {
11090 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11091 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11092 pbBuf,
11093 cbFirst,
11094 PGMACCESSORIGIN_IEM);
11095 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11096 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11097 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11098 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11099 }
11100
11101 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11102 {
11103 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11104 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11105 pbBuf + cbFirst,
11106 cbSecond,
11107 PGMACCESSORIGIN_IEM);
11108 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11109 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11110 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11111 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11112 }
11113 cBufs++;
11114 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11115 }
11116
11117 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11118 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11119 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11120 pVCpu->iem.s.cActiveMappings = 0;
11121 return rcStrict;
11122}
11123
11124#endif /* IN_RING3 */
11125
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette